Compare commits

...

10 Commits

31 changed files with 1381 additions and 62 deletions

24
CLAUDE.md Normal file
View File

@ -0,0 +1,24 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Commands
- Validate YAML: `kubectl --dry-run=client -f <filename.yaml>`
- Validate syntax with YAML linter: `yamllint <filename.yaml>`
- Check Kubernetes resources: `kubectl get <resource-type> --namespace <namespace>`
## Style Guidelines
1. YAML Formatting:
- Use 2-space indentation
- Use snake_case for keys
- Keep line length under 100 characters
2. Kubernetes Resources:
- Include resource limits/requests in all deployments
- Add appropriate labels and annotations
- Group related resources in the same file
- Use namespaces to organize resources by service
3. Documentation:
- Add comments for non-obvious configuration choices
- Document environment-specific variables clearly

View File

@ -2,9 +2,18 @@
## TODO
### Media
[ ] sabnzbd ini config map
[ ] tdarr
[ ] subtitle extractor
[ ] intro-skipper
[x] private registry
[x] secrets
[x] ntr-cv static containers
[x] check mnmlgg mail
[x] ufw
[x] grafana etc
[x] grafana etc
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- nslookup openebs-etcd.openebs.svc.cluster.local

View File

@ -11,7 +11,7 @@ metadata:
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- crates.strix.systems
@ -27,6 +27,16 @@ spec:
name: crates-client
port:
number: 8080
- host: crates.ntwl.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: crates-client
port:
number: 8080
---
@ -35,12 +45,11 @@ kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
haproxy.org/path-rewrite: "/api(/)?(.*) /$2"
name: crates-api
namespace: default
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- crates.strix.systems
@ -56,3 +65,13 @@ spec:
name: crates-api
port:
number: 41337
- host: crates.ntwl.xyz
http:
paths:
- path: /api(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: crates-api
port:
number: 41337

137
media/jellyfin.yaml Normal file
View File

@ -0,0 +1,137 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
namespace: media
spec:
selector:
matchLabels:
app: jellyfin
template:
metadata:
labels:
app: jellyfin
spec:
# USE GPU
runtimeClassName: nvidia
restartPolicy: Always
containers:
- image: ghcr.io/hotio/jellyfin
imagePullPolicy: Always
name: jellyfin
env:
- name: TZ
value: Australia/Melbourne
- name: PUID
value: '1000'
- name: PGID
value: '1000'
- name: UMASK
value: '002'
ports:
- containerPort: 8096
protocol: TCP
volumeMounts:
- mountPath: /data
name: jellyfin-data
readOnly: true
- mountPath: /config
name: jellyfin-config
volumes:
- name: jellyfin-data
persistentVolumeClaim:
claimName: data
- name: jellyfin-config
persistentVolumeClaim:
claimName: jellyfin-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellyfin-config
namespace: media
spec:
storageClassName: zfs-fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
app: jellyfin
name: jellyfin
namespace: media
spec:
ports:
- name: web
port: 8096
protocol: TCP
targetPort: 8096
selector:
app: jellyfin
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin-ingress
namespace: media
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
ingressClassName: haproxy
tls:
- secretName: jellyfin-ingress
hosts:
- jf.ntwl.xyz
rules:
- host: jf.ntwl.xyz
http:
paths:
- backend:
service:
name: jellyfin
port:
name: web
path: /
pathType: Prefix
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin-lan-ingress
namespace: media
annotations:
haproxy.org/ssl-redirect: "false"
haproxy.org/ssl-certificate: "default/tls-secret"
spec:
ingressClassName: haproxy
rules:
- host: jfl.ntwl.xyz
http:
paths:
- backend:
service:
name: jellyfin
port:
name: web
path: /
pathType: Prefix

108
media/jellyseerr.yaml Normal file
View File

@ -0,0 +1,108 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyseerr
namespace: media
spec:
selector:
matchLabels:
app: jellyseerr
template:
metadata:
labels:
app: jellyseerr
spec:
restartPolicy: Always
containers:
- image: ghcr.io/hotio/jellyseerr
imagePullPolicy: Always
name: jellyseerr
env:
- name: TZ
value: Australia/Melbourne
ports:
- containerPort: 5055
name: web
protocol: TCP
volumeMounts:
- mountPath: /app/config
name: jellyseerr-config
volumes:
- name: jellyseerr-config
persistentVolumeClaim:
claimName: jellyseerr-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jellyseerr-config
namespace: media
spec:
storageClassName: zfs-fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
app: jellyseerr
name: jellyseerr
namespace: media
spec:
ports:
- name: web
port: 5055
protocol: TCP
targetPort: 5055
selector:
app: jellyseerr
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyseerr-ingress
namespace: media
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
ingressClassName: haproxy
tls:
- secretName: jellyseerr-ingress
hosts:
- jellyseerr.ntwl.xyz
- get.ntwl.xyz
rules:
- host: jellyseerr.ntwl.xyz
http:
paths:
- backend:
service:
name: jellyseerr
port:
name: web
path: /
pathType: Prefix
- host: get.ntwl.xyz
http:
paths:
- backend:
service:
name: jellyseerr
port:
name: web
path: /
pathType: Prefix

21
media/media.yaml Normal file
View File

@ -0,0 +1,21 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: media
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data
namespace: media
spec:
storageClassName: slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Ti

42
media/nvidia.yaml Normal file
View File

@ -0,0 +1,42 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: gpu-operator
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: gpu-operator
namespace: gpu-operator
spec:
repo: https://helm.ngc.nvidia.com/nvidia
chart: gpu-operator
targetNamespace: gpu-operator
version: 24.9.2
---
# apiVersion: v1
# kind: Pod
# metadata:
# name: nbody-gpu-benchmark
# namespace: default
# spec:
# restartPolicy: OnFailure
# runtimeClassName: nvidia
# containers:
# - name: cuda-container
# image: nvcr.io/nvidia/k8s/cuda-sample:nbody
# args: ["nbody", "-gpu", "-benchmark"]
# resources:
# limits:
# nvidia.com/gpu: 1
# env:
# - name: NVIDIA_VISIBLE_DEVICES
# value: all
# - name: NVIDIA_DRIVER_CAPABILITIES
# value: all

95
media/prowlarr.yaml Normal file
View File

@ -0,0 +1,95 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prowlarr
namespace: media
spec:
selector:
matchLabels:
app: prowlarr
template:
metadata:
labels:
app: prowlarr
spec:
restartPolicy: Always
containers:
- image: ghcr.io/hotio/prowlarr
imagePullPolicy: Always
name: prowlarr
env:
- name: TZ
value: Australia/Melbourne
- name: PUID
value: '1000'
- name: PGID
value: '1000'
- name: UMASK
value: '002'
ports:
- containerPort: 9696
name: web
protocol: TCP
volumeMounts:
- mountPath: /config
name: prowlarr-config
volumes:
- name: prowlarr-config
persistentVolumeClaim:
claimName: prowlarr-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: prowlarr-config
namespace: media
spec:
storageClassName: zfs-fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
app: prowlarr
name: prowlarr
namespace: media
spec:
ports:
- name: web
port: 9696
protocol: TCP
targetPort: 9696
selector:
app: prowlarr
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: prowlarr-ingress
namespace: media
spec:
ingressClassName: haproxy
rules:
- host: prowlarr.ntwl.xyz
http:
paths:
- backend:
service:
name: prowlarr
port:
name: web
path: /
pathType: Prefix

100
media/radarr.yaml Normal file
View File

@ -0,0 +1,100 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: radarr
namespace: media
spec:
selector:
matchLabels:
app: radarr
template:
metadata:
labels:
app: radarr
spec:
restartPolicy: Always
containers:
- image: ghcr.io/hotio/radarr
imagePullPolicy: Always
name: radarr
env:
- name: TZ
value: Australia/Melbourne
- name: PUID
value: '1000'
- name: PGID
value: '1000'
- name: UMASK
value: '002'
ports:
- containerPort: 7878
name: web
protocol: TCP
volumeMounts:
- mountPath: /data
name: radarr-data
- mountPath: /config
name: radarr-config
volumes:
- name: radarr-data
persistentVolumeClaim:
claimName: data
- name: radarr-config
persistentVolumeClaim:
claimName: radarr-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: radarr-config
namespace: media
spec:
storageClassName: zfs-fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
app: radarr
name: radarr
namespace: media
spec:
ports:
- name: web
port: 7878
protocol: TCP
targetPort: 7878
selector:
app: radarr
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: radarr-ingress
namespace: media
spec:
ingressClassName: haproxy
rules:
- host: radarr.ntwl.xyz
http:
paths:
- backend:
service:
name: radarr
port:
name: web
path: /
pathType: Prefix

109
media/sabnzbd.yaml Normal file
View File

@ -0,0 +1,109 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sabnzbd
namespace: media
spec:
selector:
matchLabels:
app: sabnzbd
template:
metadata:
labels:
app: sabnzbd
spec:
restartPolicy: Always
containers:
- image: ghcr.io/hotio/sabnzbd
imagePullPolicy: Always
name: sabnzbd
env:
- name: TZ
value: Australia/Melbourne
- name: PUID
value: '1000'
- name: PGID
value: '1000'
- name: UMASK
value: '002'
- name: WEBUI_PORTS
value: '8080/tcp,8080/udp'
- name: ARGS
value: ''
ports:
- containerPort: 8080
name: web
protocol: TCP
volumeMounts:
- mountPath: /data
name: sabnzbd-data
- mountPath: /config
name: sabnzbd-config
volumes:
- name: sabnzbd-data
persistentVolumeClaim:
claimName: data
- name: sabnzbd-config
persistentVolumeClaim:
claimName: sabnzbd-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sabnzbd-config
namespace: media
spec:
storageClassName: zfs-fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
app: sabnzbd
name: sabnzbd
namespace: media
spec:
ports:
- name: web
port: 8080
protocol: TCP
targetPort: 8080
- name: web-udp
port: 8080
protocol: UDP
targetPort: 8080
selector:
app: sabnzbd
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sabnzbd-ingress
namespace: media
spec:
ingressClassName: haproxy
rules:
- host: sabnzbd.ntwl.xyz
http:
paths:
- backend:
service:
name: sabnzbd
port:
name: web
path: /
pathType: Prefix

101
media/sonarr.yaml Normal file
View File

@ -0,0 +1,101 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sonarr
namespace: media
spec:
selector:
matchLabels:
app: sonarr
template:
metadata:
labels:
app: sonarr
spec:
restartPolicy: Always
containers:
- image: ghcr.io/hotio/sonarr
imagePullPolicy: Always
name: sonarr
env:
- name: TZ
value: Australia/Melbourne
- name: PUID
value: '1000'
- name: PGID
value: '1000'
- name: UMASK
value: '002'
ports:
- containerPort: 8989
name: web
protocol: TCP
volumeMounts:
- mountPath: /data
name: sonarr-data
- mountPath: /config
name: sonarr-config
volumes:
- name: sonarr-data
persistentVolumeClaim:
claimName: data
- name: sonarr-config
persistentVolumeClaim:
claimName: sonarr-config
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: sonarr-config
namespace: media
spec:
storageClassName: zfs-fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
app: sonarr
name: sonarr
namespace: media
spec:
ports:
- name: web
port: 8989
protocol: TCP
targetPort: 8989
selector:
app: sonarr
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sonarr-ingress
namespace: media
spec:
ingressClassName: haproxy
rules:
- host: sonarr.ntwl.xyz
http:
paths:
- backend:
service:
name: sonarr
port:
name: web
path: /
pathType: Prefix

View File

@ -5,11 +5,13 @@ metadata:
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- memestream.strix.systems
- search.memestream.strix.systems
- memestream.ntwl.xyz
- search.memestream.ntwl.xyz
secretName: memestream-strix-systems-tls
rules:
- host: search.memestream.strix.systems
@ -22,7 +24,27 @@ spec:
name: memestream-meili
port:
number: 7700
- host: search.memestream.ntwl.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: memestream-meili
port:
number: 7700
- host: memestream.strix.systems
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: memestream-client
port:
number: 8080
- host: memestream.ntwl.xyz
http:
paths:
- path: /

View File

@ -45,7 +45,7 @@ metadata:
spec:
storageClassName: ""
capacity:
storage: 2Gi
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
@ -68,7 +68,7 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storage: 5Gi
---
@ -76,6 +76,30 @@ spec:
# DEPLOYMENTS
# -----------------------------------------------------------------------
apiVersion: apps/v1
kind: Deployment
metadata:
name: memestream-archiver
spec:
replicas: 1
selector:
matchLabels:
app: memestream-archiver
template:
metadata:
labels:
app: memestream-archiver
spec:
containers:
- name: memestream-archiver
image: registry.strix.systems/memestream-archiver
imagePullPolicy: Always
envFrom:
- secretRef:
name: memestream-archiver
---
apiVersion: apps/v1
kind: Deployment
metadata:

View File

@ -10,7 +10,7 @@ metadata:
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- mnml.gg
@ -36,9 +36,9 @@ metadata:
namespace: default
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.org/websocket-services: "mnml-ws"
haproxy.org/websocket-support: "true"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- mnml.gg
@ -63,11 +63,10 @@ metadata:
name: mnml-ws
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.org/proxy-read-timeout: "3600"
nginx.org/proxy-send-timeout: "3600"
nginx.org/websocket-services: mnml-ws
haproxy.org/websocket-support: "true"
haproxy.org/timeout-tunnel: "3600s"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- mnml.gg

View File

@ -1,6 +1,21 @@
# using the same issuer for everything
# ntr@strix is the big boss
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
namespace: cert-manager
spec:
repo: https://charts.jetstack.io
chart: cert-manager
targetNamespace: networking
valuesContent: |-
crds:
enabled: true
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
@ -19,7 +34,7 @@ spec:
solvers:
- http01:
ingress:
ingressClassName: nginx
ingressClassName: haproxy
---
@ -40,4 +55,5 @@ spec:
solvers:
- http01:
ingress:
ingressClassName: nginx
ingressClassName: haproxy

19
networking/haproxy.yaml Normal file
View File

@ -0,0 +1,19 @@
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: haproxy-kubernetes-ingress
namespace: kube-system
spec:
repo: https://haproxytech.github.io/helm-charts
chart: kubernetes-ingress
targetNamespace: networking
valuesContent: |-
controller:
kind: DaemonSet
daemonset:
useHostPort: true
config:
ssl-redirect-port: "443"
# - --https-bind-port=443

View File

@ -0,0 +1,6 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: networking

168
networking/pihole.yaml Normal file
View File

@ -0,0 +1,168 @@
---
# -----------------------------------------------------------------------
# DEPLOYMENTS
# -----------------------------------------------------------------------
apiVersion: apps/v1
kind: Deployment
metadata:
name: pihole
namespace: networking
spec:
replicas: 1
selector:
matchLabels:
app: pihole
template:
metadata:
labels:
app: pihole
spec:
containers:
- name: pihole
image: pihole/pihole:latest
imagePullPolicy: IfNotPresent
env:
- name: TZ
value: "Australia/Melbourne"
- name: FTLCONF_webserver_api_password
value: grepgrepgrep
# allow it to respond to devices outside cluster
- name: FTLCONF_dns_listeningMode
value: single
# resolve wildcards
- name: FTLCONF_misc_dnsmasq_lines
value: address=/nightowl.strix.systems/192.168.1.88
ports:
- containerPort: 53
protocol: TCP
- containerPort: 53
protocol: UDP
- containerPort: 67
protocol: UDP
- containerPort: 80
protocol: TCP
- containerPort: 443
protocol: TCP
# volumeMounts:
# - name: etc
# mountPath: /etc/pihole
# - name: dnsmasq
# mountPath: /etc/dnsmasq.d
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 2Gi
cpu: 1
# volumes:
# - name: etc
# hostPath:
# path: /data/pihole/etc
# type: Directory
# - name: dnsmasq
# hostPath:
# path: /data/pihole/dnsmasq.d
# type: Directory
# -----------------------------------------------------------------------
# SERVICES
# -----------------------------------------------------------------------
---
kind: Service
apiVersion: v1
metadata:
name: pihole
namespace: networking
spec:
selector:
app: pihole
ports:
- name: web
port: 80
targetPort: 80
- name: dns-tcp
port: 53
targetPort: 53
protocol: TCP
- name: dns-udp
port: 53
targetPort: 53
protocol: UDP
---
apiVersion: v1
kind: Service
metadata:
name: pihole-dns-udp
namespace: networking
spec:
selector:
app: pihole
ports:
- name: dns-udp
port: 53
protocol: UDP
externalTrafficPolicy: Local
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
name: pihole-dns-tcp
namespace: networking
spec:
selector:
app: pihole
ports:
- name: dns-tcp
port: 53
protocol: TCP
externalTrafficPolicy: Local
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
name: pihole-web-debug
namespace: networking
spec:
selector:
app: pihole
ports:
- name: dns-tcp
port: 9980
targetPort: 80
protocol: TCP
externalTrafficPolicy: Local
type: LoadBalancer
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pihole
namespace: networking
spec:
ingressClassName: haproxy
rules:
- host: "pihole.nightowl.strix.systems"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pihole
port:
name: web

View File

@ -7,10 +7,11 @@ metadata:
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- strix.systems
- ntr.ntwl.xyz
secretName: strix-systems-tls
rules:
- host: strix.systems
@ -23,3 +24,13 @@ spec:
name: ntr-cv
port:
number: 8080
- host: ntr.ntwl.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ntr-cv
port:
number: 8080

9
registry/generate_auth.sh → ops/generate_auth.sh Executable file → Normal file
View File

@ -11,7 +11,7 @@ htpasswd() {
htpasswd -Bbn ${1} ${2} | head -n 1 2> /dev/null\""
}
K3S_HOST=strix.systems
K3S_HOST=ntwl.xyz
REGISTRY_ADMIN=ntr
REGISTRY_PASSWORD=$(gen_password)
REGISTRY_AUTH=$(htpasswd ${REGISTRY_ADMIN} ${REGISTRY_PASSWORD})
@ -24,6 +24,7 @@ echo REGISTRY_AUTH is ${REGISTRY_AUTH}
echo REGISTRY_HTTP_SECRET is ${REGISTRY_HTTP_SECRET}
kubectl create secret generic registry \
-n ops \
--from-literal=REGISTRY_ADMIN=${REGISTRY_ADMIN} \
--from-literal=REGISTRY_PASSWORD=${REGISTRY_PASSWORD} \
--from-literal=REGISTRY_HTTP_SECRET=${REGISTRY_HTTP_SECRET} \
@ -31,11 +32,11 @@ kubectl create secret generic registry \
# cat <<EOF | ssh root@strix tee /etc/rancher/k3s/registries.yaml
# mirrors:
# registry.strix.systems:
# registry.ntwl.xyz:
# endpoint:
# - "https://registry.strix.systems"
# - "https://registry.ntwl.xyz"
# configs:
# "registry.strix.systems":
# "registry.ntwl.xyz":
# auth:
# username: ntr
# password: pw

130
ops/git.yaml Normal file
View File

@ -0,0 +1,130 @@
---
apiVersion: v1
kind: Secret
metadata:
name: gitea-admin-secret
namespace: ops
type: Opaque
stringData:
username: ntr
password: "ghastly ghouls"
email: "ntr@strix.systems"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-data
namespace: ops
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-redis
namespace: ops
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gitea-pg
namespace: ops
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: gitea
namespace: ops
spec:
repo: https://dl.gitea.com/charts/
chart: gitea
targetNamespace: ops
valuesContent: |-
ingress:
enabled: true
className: haproxy
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
tls:
- secretName: git-tls
hosts:
- git.ntwl.xyz
# - git.strix.systems
hosts:
- host: git.ntwl.xyz
paths:
- path: /
pathType: Prefix
- host: git.strix.systems
paths:
- path: /
pathType: Prefix
service:
ssh:
type: LoadBalancer
port: 60022
externalTrafficPolicy: Local
redis-cluster:
enabled: false
redis:
enabled: true
persistence:
enabled: true
existingClaim: gitea-redis
postgresql:
enabled: true
persistence:
enabled: true
existingClaim: gitea-pg
postgresql-ha:
enabled: false
persistence:
enabled: true
existingClaim: gitea-data
gitea:
config:
database:
DB_TYPE: postgres
indexer:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
server:
SSH_PORT: 60022
admin:
existingSecret: gitea-admin-secret

View File

@ -8,10 +8,11 @@ metadata:
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- monitor.strix.systems
- monitor.ntwl.xyz
secretName: monitor-strix-systems-tls
rules:
- host: monitor.strix.systems
@ -24,3 +25,13 @@ spec:
name: kube-prometheus-stack-grafana
port:
number: 80
- host: monitor.ntwl.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kube-prometheus-stack-grafana
port:
number: 80

6
ops/ops.yaml Normal file
View File

@ -0,0 +1,6 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: ops

View File

@ -5,13 +5,14 @@ kind: Ingress
metadata:
name: registry
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 2g
haproxy.org/proxy-body-size-limit: "2g"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- registry.strix.systems
- registry.ntwl.xyz
secretName: registry-strix-systems-tls
rules:
- host: registry.strix.systems
@ -24,3 +25,13 @@ spec:
name: registry
port:
number: 5000
- host: registry.ntwl.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: registry
port:
number: 5000

View File

@ -1,37 +1,17 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: registry-pv
spec:
storageClassName: local-path
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
claimRef:
namespace: default
name: registry-pvc
hostPath:
path: "/var/lib/rancher/k3s/storage/registry-pv"
type: DirectoryOrCreate
---
apiVersion: v1
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: registry-pvc
name: registry-storage
namespace: ops
spec:
volumeName: registry-pv
storageClassName: zfs-fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: local-path
---
@ -39,6 +19,7 @@ apiVersion: v1
kind: Service
metadata:
name: registry
namespace: ops
spec:
ports:
- name: web
@ -53,6 +34,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: ops
labels:
app: registry
spec:
@ -88,7 +70,7 @@ spec:
name: registry
key: REGISTRY_HTTP_SECRET
volumeMounts:
- name: registry-pvc
- name: registry-storage
mountPath: /var/lib/registry
- name: registry-auth
mountPath: /auth
@ -105,9 +87,9 @@ spec:
- name: registry-config
configMap:
name: registry
- name: registry-pvc
- name: registry-storage
persistentVolumeClaim:
claimName: registry-pvc
claimName: registry-storage
---
@ -115,7 +97,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: registry
# namespace: registry
namespace: ops
data:
config.yml: |
version: 0.1

View File

@ -5,11 +5,9 @@ metadata:
name: spacerace-api
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
# don't do this again
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
haproxy.org/path-rewrite: "/api(/)?(.*) /$2"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- spacerace.strix.systems

View File

@ -7,10 +7,11 @@ metadata:
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- minio.strix.systems
- minio.ntwl.xyz
secretName: minio.strix.systems
rules:
- host: minio.strix.systems
@ -23,6 +24,16 @@ spec:
name: minio-service
port:
number: 9090
- host: minio.ntwl.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio-service
port:
number: 9090
---
@ -34,12 +45,13 @@ metadata:
app: minio
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/proxy-body-size: 1g
haproxy.org/proxy-body-size-limit: "1g"
spec:
ingressClassName: nginx
ingressClassName: haproxy
tls:
- hosts:
- storage.strix.systems
- storage.ntwl.xyz
secretName: storage.strix.systems
rules:
- host: storage.strix.systems
@ -52,3 +64,13 @@ spec:
name: minio-service
port:
number: 9000
- host: storage.ntwl.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio-service
port:
number: 9000

View File

@ -11,7 +11,7 @@ metadata:
spec:
storageClassName: ""
capacity:
storage: 10Gi
storage: 20Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
@ -34,7 +34,7 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storage: 20Gi
---

128
storage/openebs.yaml Normal file
View File

@ -0,0 +1,128 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: openebs
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: openebs
namespace: openebs
spec:
repo: https://openebs.github.io/openebs/
chart: openebs
targetNamespace: openebs
valuesContent: |-
engines:
replicated:
mayastor:
enabled: false
local:
lvm:
enabled: false
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: zfs-fast
parameters:
poolname: "fast/k8s"
recordsize: "128k"
compression: "off"
dedup: "off"
fstype: "zfs"
provisioner: zfs.csi.openebs.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: zfs-slow
parameters:
poolname: "slow/k8s"
# https://www.zfshandbook.com/docs/advanced-zfs/performance-tuning/
recordsize: "1M"
compression: "off"
dedup: "off"
fstype: "zfs"
provisioner: zfs.csi.openebs.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: zfs-fast-shared
parameters:
poolname: "fast/k8s"
recordsize: "128k"
shared: "yes"
compression: "off"
dedup: "off"
fstype: "zfs"
provisioner: zfs.csi.openebs.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: zfs-slow-shared
parameters:
poolname: "slow/k8s"
# https://www.zfshandbook.com/docs/advanced-zfs/performance-tuning/
recordsize: "1M"
shared: "yes"
compression: "off"
dedup: "off"
fstype: "zfs"
provisioner: zfs.csi.openebs.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: fast
annotations:
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/fast/k8s/"
openebs.io/cas-type: local
provisioner: openebs.io/local
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: slow
annotations:
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/slow/k8s/"
openebs.io/cas-type: local
provisioner: openebs.io/local
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer