monorepo lab stuff, init zen

This commit is contained in:
iofq 2025-12-27 22:26:02 -06:00
parent cfc15bba89
commit 645e09f9dd
54 changed files with 67498 additions and 406 deletions

6
clusters/lab/.sops.yaml Normal file
View file

@ -0,0 +1,6 @@
---
keys:
- &t14 age14e2d2y8e2avzfrsyxg9dudxd36svm24t7skw6e969n0c42znlp3shffdtg
creation_rules:
- unencrypted_regex: "^(apiVersion|metadata|kind|type)$"
age: *t14

View file

@ -0,0 +1,92 @@
---
apiVersion: v1
kind: Service
metadata:
name: adguard-svc
namespace: adguard
spec:
selector:
app: adguard
ports:
- protocol: TCP
port: 8082
targetPort: 3000
name: http-init
- protocol: TCP
port: 8081
targetPort: 80
name: http
- protocol: TCP
port: 53
targetPort: 53
name: dns-tcp
- protocol: UDP
port: 53
targetPort: 53
name: dns-udp
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: adguard
namespace: adguard
spec:
selector:
matchLabels:
app: adguard
replicas: 0
template:
metadata:
labels:
app: adguard
spec:
containers:
- name: adguard
image: adguard/adguardhome:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
- containerPort: 53
name: dns
- containerPort: 3000
name: init
volumeMounts:
- name: adguard-data
mountPath: /opt/adguardhome/work
- name: adguard-conf
mountPath: /opt/adguardhome/conf
volumes:
- name: adguard-data
persistentVolumeClaim:
claimName: adguard-pvc-data
- name: adguard-conf
persistentVolumeClaim:
claimName: adguard-pvc-conf
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: adguard-pvc-conf
namespace: adguard
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: adguard-pvc-data
namespace: adguard
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 1Gi

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: adguard
resources:
# - adguard-deployment.yaml

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kube-system
resources:
- sealed-secrets-release.yaml

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,11 @@
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: sealed-secrets-controller
namespace: kube-system
spec:
repo: https://bitnami-labs.github.io/sealed-secrets
chart: sealed-secrets
valuesContent: |-
fullnameOverride: sealed-secrets-controller

View file

@ -0,0 +1,10 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: eth
resources:
# - namespace.yaml
# - nethermind-release.yaml
# - nimbus-release.yaml
# - besu-release.yaml
# - mev-boost.yaml

View file

@ -0,0 +1,45 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mev-boost
namespace: eth
spec:
selector:
matchLabels:
app.kubernetes.io/app: mev-boost
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/app: mev-boost
spec:
containers:
- name: mev-boost
image: "flashbots/mev-boost:1.8"
imagePullPolicy: Always
ports:
- containerPort: 18550
args:
- "--addr"
- "0.0.0.0:18550"
- "--min-bid"
- "0.05"
- "--relay-check"
- "--relays"
- "https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net"
- "--debug"
---
apiVersion: v1
kind: Service
metadata:
name: mev-boost
namespace: eth
spec:
type: ClusterIP
selector:
app.kubernetes.io/app: mev-boost
ports:
- protocol: TCP
port: 18550
targetPort: 18550

View file

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: eth

View file

@ -0,0 +1,54 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nethermind-mainnet-pvc
namespace: eth
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 1200Gi
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: nethermind-mainnet
namespace: kube-system
spec:
targetNamespace: eth
repo: https://ethpandaops.github.io/ethereum-helm-charts/
chart: nethermind
valuesContent: |-
replicas: 1
image:
pullPolicy: "Always"
tag: 1.31.10
extraArgs:
- "--Network.MaxActivePeers 20"
- "--Pruning.CacheMb 4096"
- "--Pruning.FullPruningTrigger VolumeFreeSpace"
- "--Pruning.FullPruningCompletionBehavior AlwaysShutdown"
- "--Init.MemoryHint 4096000000"
p2pNodePort:
enabled: true
port: 30303
persistence:
enabled: true
existingClaim: nethermind-mainnet-pvc
---
apiVersion: v1
kind: Service
metadata:
name: nethermind-http-rpc
namespace: eth
spec:
type: LoadBalancer
selector:
app.kubernetes.io/instance: nethermind-mainnet
ports:
- protocol: TCP
port: 8545
targetPort: 8545

View file

@ -0,0 +1,57 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nimbus-mainnet-pvc
namespace: eth
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 500Gi
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: nimbus-mainnet
namespace: kube-system
spec:
targetNamespace: eth
repo: https://ethpandaops.github.io/ethereum-helm-charts/
chart: nimbus
valuesContent: |-
replicas: 1
image:
pullPolicy: "Always"
tag: "multiarch-v25.5.0"
extraArgs:
- "--web3-url=http://nethermind-mainnet.eth.svc.cluster.local:8551"
- "--payload-builder=true"
- "--payload-builder-url=http://mev-boost.eth.svc.cluster.local:18550"
- "--max-peers=100"
p2pNodePort:
enabled: true
port: 30001
persistence:
enabled: true
existingClaim: nimbus-mainnet-pvc
checkpointSync:
enabled: true
network: mainnet
url: https://mainnet-checkpoint-sync.attestant.io
---
apiVersion: v1
kind: Service
metadata:
name: nimbus-http-rpc
namespace: eth
spec:
type: LoadBalancer
selector:
app.kubernetes.io/instance: nimbus-mainnet
ports:
- protocol: TCP
port: 5052
targetPort: 5052

View file

@ -0,0 +1,14 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
generatorOptions:
labels:
type: generated
resources:
- crds/
- minecraft/
- soft-serve/
- eth/
- unifi/
- adguard/
- smokeping/

View file

@ -0,0 +1,98 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kiki-mc-world
namespace: minecraft
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: kiki-minecraft
namespace: kube-system
spec:
targetNamespace: minecraft
repo: https://itzg.github.io/minecraft-server-charts/
chart: minecraft
valuesContent: |-
image:
repository: itzg/minecraft-server
tag: latest
pullPolicy: Always
replicaCount: 1
resources:
requests:
memory: 2000Mi
cpu: 1000m
strategyType: Recreate
nodeSelector: {}
tolerations: []
affinity: {}
securityContext:
runAsUser: 1000
fsGroup: 1000
livenessProbe:
command:
- mc-health
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 20
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
command:
- mc-health
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 20
successThreshold: 1
timeoutSeconds: 1
startupProbe:
command:
- mc-health
enabled: false
failureThreshold: 30
periodSeconds: 10
extraVolumes: []
minecraftServer:
eula: "TRUE"
wersion: "latest"
type: "VANILLA"
difficulty: normal
whitelist: cjriddz,k359
ops: cjriddz,k359
maxWorldSize: 15000
viewDistance: 16
motd: "good morning :)"
pvp: false
levelType: DEFAULT
worldSaveName: world
forceReDownload: false
memory: 2000M
serviceAnnotations: {}
serviceType: LoadBalancer
servicePort: 25566
clusterIP:
loadBalancerIP:
externalIPs:
query:
enabled: false
port: 25566
rcon:
enabled: true
withGeneratedPassword: true
envFrom: []
persistence:
annotations: {}
storageClass: "longhorn"
dataDir:
enabled: true
existingClaim: kiki-mc-world

View file

@ -0,0 +1,9 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minecraft
resources:
- minecraft-helm.yaml
- kiki-minecraft-helm.yaml
# - minecraft-restic-backup.yaml
# - minecraft-restic-secrets.yaml.enc

View file

@ -0,0 +1,163 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: minecraft
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mc-world
namespace: minecraft
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: minecraft
namespace: kube-system
spec:
targetNamespace: minecraft
repo: https://itzg.github.io/minecraft-server-charts/
chart: minecraft
valuesContent: |-
image:
repository: itzg/minecraft-server
tag: java21
pullPolicy: Always
replicaCount: 1
resources:
requests:
memory: 3000Mi
cpu: 1000m
strategyType: Recreate
nodeSelector: {}
tolerations: []
affinity: {}
securityContext:
runAsUser: 1000
fsGroup: 1000
livenessProbe:
command:
- mc-health
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 20
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
command:
- mc-health
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 20
successThreshold: 1
timeoutSeconds: 1
startupProbe:
command:
- mc-health
enabled: false
failureThreshold: 30
periodSeconds: 10
extraVolumes: []
minecraftServer:
eula: "TRUE"
wersion: "latest"
type: "FABRIC"
difficulty: normal
whitelist: cjriddz,k359,yessorre,ZaltyPretzel,Yessorre,aemdryr
ops: cjriddz,k359,yessorre,ZaltyPretzel,Yessorre,aemdryr
maxWorldSize: 15000
viewDistance: 16
motd: "good morning :)"
pvp: false
levelType: DEFAULT
worldSaveName: world-gims-7
forceReDownload: false
memory: 3000M
serviceAnnotations: {}
serviceType: LoadBalancer
servicePort: 25565
clusterIP:
loadBalancerIP:
externalIPs:
query:
enabled: false
port: 25565
rcon:
enabled: true
withGeneratedPassword: true
extraEnv:
# https://fabricmc.net/use/server/
VERSION_FROM_MODRINTH_PROJECTS: true
RCON_CMDS_STARTUP: |-
gamerule playersSleepingPercentage 19
gamerule doInsomnia false
gamerule mobGriefing false
# deprecated mods
# incendium:alpha
# nullscape
# true-ending
# upgraded-mobs
# spellbound-weapons
# neoenchant
# lukis-grand-capitals
# lukis-crazy-chambers
# lukis-ancient-cities
# towns-and-towers
# dungeons-and-taverns-jungle-temple-overhaul
# dungeons-and-taverns-ocean-monument-overhaul
# dungeons-and-taverns-woodland-mansion-replacement
# dungeons-and-taverns-nether-fortress-overhaul
# dungeons-and-taverns-stronghold-overhaul
# structory
# structory-towers
# yggdrasil-structure
# hostile-mobs-improve-over-time
# beyondenchant
# expanded-axe-enchanting
# expanded-weapon-enchanting
# expanded-bow-enchanting
# expanded-armor-enchanting
# expanded-trident-enchanting
# infinite-trading
# healing-campfire
# fabric-language-kotlin
# cloth-config
# owo-lib
# cristel-lib
# ct-overhaul-village
# tectonic
# terralith
# portfolio
# tree-harvester
# chunky
# ferrite-core
# scalablelux
# appleskin
# inventory-sorting
# datapack:health-indicator
MODRINTH_PROJECTS: |-
fabric-api
collective
cloth-config
lithium
c2me-fabric:alpha
your-items-are-safe
datapack:geophilic
dungeons-and-taverns
more-mobs
envFrom: []
persistence:
annotations: {}
storageClass: "longhorn"
dataDir:
enabled: true
existingClaim: mc-world

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: smokeping
resources:
- smokeping-helm.yaml

View file

@ -0,0 +1,40 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: smokeping
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: smokeping
namespace: kube-system
spec:
targetNamespace: smokeping
repo: https://nicholaswilde.github.io/helm-charts/
chart: smokeping
valuesContent: |-
image:
repository: ghcr.io/linuxserver/smokeping
pullPolicy: IfNotPresent
env:
TZ: "America/Chigaco"
ingress:
enabled: false
persistence:
config:
enabled: true
emptyDir: false
mountPath: /config
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
skipuninstall: false
data:
enabled: true
emptyDir: false
mountPath: /data
storageClass: local-path
accessMode: ReadWriteOnce
size: 1Gi
skipuninstall: false

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: soft-serve
resources:
# - ss-deployment.yaml

View file

@ -0,0 +1,64 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: soft-serve
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: soft-serve-pvc
namespace: soft-serve
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Service
metadata:
name: soft-serve-svc
namespace: soft-serve
spec:
selector:
app: soft-serve
ports:
- protocol: TCP
port: 22
targetPort: 23231
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: soft-serve
namespace: soft-serve
spec:
selector:
matchLabels:
app: soft-serve
replicas: 1
template:
metadata:
labels:
app: soft-serve
spec:
containers:
- name: soft-serve
image: charmcli/soft-serve:v0.10.0
imagePullPolicy: Always
ports:
- containerPort: 23231
volumeMounts:
- name: soft-serve-data
mountPath: /soft-serve
env:
- name: SOFT_SERVE_INITIAL_ADMIN_KEYS
value: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHM4Zr0PFN7QdOG2aJ+nuzRCK6caulrpY6bphA1Ppl8Y e@t14"
volumes:
- name: soft-serve-data
persistentVolumeClaim:
claimName: soft-serve-pvc

View file

@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: adguard
resources:
- unifi-deployment.yaml

View file

@ -0,0 +1,75 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: unifi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: unifi-pvc
namespace: unifi
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Service
metadata:
name: unifi-svc
namespace: unifi
spec:
selector:
app: unifi
ports:
- protocol: TCP
port: 8443
targetPort: 8443
name: http
- protocol: UDP
port: 10001
targetPort: 10001
name: ap-disc
- protocol: TCP
port: 8080
targetPort: 8080
name: adopt
type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: unifi
namespace: unifi
spec:
selector:
matchLabels:
app: unifi
replicas: 1
template:
metadata:
labels:
app: unifi
spec:
containers:
- name: unifi
image: lscr.io/linuxserver/unifi-controller:latest
imagePullPolicy: Always
ports:
- containerPort: 8080
name: adopt
- containerPort: 10001
name: ap-disc
- containerPort: 8443
name: http
volumeMounts:
- name: unifi-data
mountPath: /config
volumes:
- name: unifi-data
persistentVolumeClaim:
claimName: unifi-pvc