This commit is contained in:
nomad 2025-05-26 04:16:11 +03:00
commit 5910b2dde5
26 changed files with 1351 additions and 0 deletions

1
alaskartvChart Submodule

@ -0,0 +1 @@
Subproject commit e7d94bb2fb3c6b5cc570fbe113d0a86db46bf543

95
backups/cronjobs.yaml Normal file
View file

@ -0,0 +1,95 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: backups-nfs
namespace: backups
spec:
schedule: "0 2 * * *" # Runs daily at 2 AM
successfulJobsHistoryLimit: 1
failedJobsHistoryLimit: 1
jobTemplate:
spec:
template:
spec:
containers:
- name: nfs-backup
image: alpine:3.18
command:
- /bin/sh
- -c
- |
apk add --no-cache rsync
# Backup /SSD/configs/ to backups/alaskartv
rsync -av --delete /configs/ /backups/alaskartv/
# Backup Forgejo instance and DB
mkdir /backups/git
rsync -av --delete /git/forgejo-instance/ /backups/git/forgejo-instance/
rsync -av --delete /git/forgejo-db/ /backups/git/forgejo-db/
volumeMounts:
- name: nfs-configs
mountPath: /configs
- name: nfs-git
mountPath: /git
- name: nfs-backups
mountPath: /backups
securityContext:
runAsUser: 0
runAsGroup: 0
privileged: true
restartPolicy: OnFailure
volumes:
- name: nfs-configs
persistentVolumeClaim:
claimName: nfs-ssd-configs-pvc
- name: nfs-git
persistentVolumeClaim:
claimName: nfs-nas-git-pvc
- name: nfs-backups
persistentVolumeClaim:
claimName: nfs-nas-backups-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-ssd-configs-pvc
namespace: backups
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
storageClassName: ""
volumeName: nfs-ssd-configs
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-nas-git-pvc
namespace: backups
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
storageClassName: ""
volumeName: nfs-nas-git
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-nas-backups-pvc
namespace: backups
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
storageClassName: ""
volumeName: nfs-nas-backups

67
backups/pv.yaml Normal file
View file

@ -0,0 +1,67 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-ssd-configs
namespace: backups
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-ssd
volumeAttributes:
server: 192.168.0.200
share: /SSD/media/configs
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-nas-git
namespace: backups
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
mountOptions:
- rw
- sync
- noatime
- vers=4.2
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-nas-git
volumeAttributes:
server: 192.168.0.100
share: /git
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-nas-backups
namespace: backups
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
mountOptions:
- rw
- sync
- noatime
- vers=4.2
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-nas-backups
volumeAttributes:
server: 192.168.0.100
share: /backups

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: csi-rclone

View file

@ -0,0 +1,66 @@
# This YAML file contains RBAC API objects that are necessary to run external
# CSI attacher for rclone adapter
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-controller-rclone
namespace: csi-rclone
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-controller-rclone
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch", "update", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "create", "delete"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role-rclone
subjects:
- kind: ServiceAccount
name: csi-controller-rclone
namespace: csi-rclone
roleRef:
kind: ClusterRole
name: external-controller-rclone
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,69 @@
# This YAML file contains attacher & csi driver API objects that are necessary
# to run external CSI attacher for rclone
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-controller-rclone
namespace: csi-rclone
spec:
serviceName: "csi-controller-rclone"
replicas: 1
selector:
matchLabels:
app: csi-controller-rclone
template:
metadata:
labels:
app: csi-controller-rclone
spec:
serviceAccountName: csi-controller-rclone
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.2
args:
- "--csi-address=$(ADDRESS)"
- "--extra-create-metadata"
# - "--leader-election"
- "--v=1"
env:
- name: ADDRESS
value: /plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
# - "--leader-election"
env:
- name: ADDRESS
value: /plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
- name: rclone
image: wunderio/csi-rclone:v3.0.0
args :
- "/bin/csi-rclone-plugin"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=1"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
volumes:
- name: socket-dir
emptyDir: {}

View file

@ -0,0 +1,8 @@
# this should be deregistered once the controller stops
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi-rclone
spec:
attachRequired: true
podInfoOnMount: true

View file

@ -0,0 +1,40 @@
# This YAML defines all API objects to create RBAC roles for CSI node plugin
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin-rclone
namespace: csi-rclone
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-rclone
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["secrets","secret"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-rclone
subjects:
- kind: ServiceAccount
name: csi-nodeplugin-rclone
namespace: csi-rclone
roleRef:
kind: ClusterRole
name: csi-nodeplugin-rclone
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,83 @@
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for rclone
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-nodeplugin-rclone
namespace: csi-rclone
spec:
selector:
matchLabels:
app: csi-nodeplugin-rclone
template:
metadata:
labels:
app: csi-nodeplugin-rclone
spec:
serviceAccountName: csi-nodeplugin-rclone
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/csi-rclone /registration/csi-rclone-reg.sock"]
args:
- --v=1
- --csi-address=/plugin/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-rclone/csi.sock
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: registration-dir
mountPath: /registration
- name: rclone
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: wunderio/csi-rclone:v3.0.0
args:
- "/bin/csi-rclone-plugin"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=1"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
imagePullPolicy: "Always"
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "mount -t fuse.rclone | while read -r mount; do umount $(echo $mount | awk '{print $3}') || true ; done"]
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
volumes:
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-rclone
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: DirectoryOrCreate
name: registration-dir

View file

@ -0,0 +1,69 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rclone
provisioner: csi-rclone
parameters:
remote: "webdav"
remotePath: "/"
webdav-url: "http://192.168.0.200:30999/dav/"
allow-other: "true"
allow-non-empty: "true"
async-read: "true"
buffer-size: "48M"
dir-cache-time: "15s"
cache-dir: "/mnt/rclone-cache"
dir-permissions: "0775"
file-permissions: "0664"
gid: "1000"
log-level: "INFO"
poll-interval: "15s"
timeout: "10m"
uid: "1000"
use-mmap: "true"
vfs-cache-max-age: "672h"
vfs-cache-max-size: "5G"
vfs-cache-mode: "full"
vfs-cache-poll-interval: "15s"
vfs-fast-fingerprint: "true"
vfs-read-ahead: "96M"
vfs-read-chunk-size: "32M"
vfs-read-chunk-size-limit: "32M"
vfs-refresh: "true"
#pathPattern: "${.PVC.namespace}/${.PVC.annotations.csi-rclone/storage-path}"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rclone-tor
provisioner: csi-rclone
parameters:
remote: "webdav"
remotePath: "/"
webdav-url: "http://192.168.0.200:30999/dav/"
webdav-user: "binaskar9@gmail.com"
webdav-pass: "u06OwwL0ujAhTlUrAGR8o9cFjbcC5LOwjCLZaQ"
allow-other: "true"
allow-non-empty: "true"
async-read: "true"
buffer-size: "48M"
dir-cache-time: "15s"
cache-dir: "/mnt/rclone-cache-tor"
dir-permissions: "0775"
file-permissions: "0664"
gid: "1000"
log-level: "INFO"
poll-interval: "15s"
timeout: "10m"
uid: "1000"
use-mmap: "true"
vfs-cache-max-age: "672h"
vfs-cache-max-size: "5G"
vfs-cache-mode: "full"
vfs-cache-poll-interval: "15s"
vfs-fast-fingerprint: "true"
vfs-read-ahead: "96M"
vfs-read-chunk-size: "32M"
vfs-read-chunk-size-limit: "32M"
vfs-refresh: "true"

View file

@ -0,0 +1,122 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: forgejo-runner
name: forgejo-runner
namespace: development
spec:
replicas: 1
selector:
matchLabels:
app: forgejo-runner
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: forgejo-runner
spec:
nodeSelector:
role: worker
restartPolicy: Always
volumes:
- name: docker-certs
emptyDir: {}
- name: docker-storage
hostPath:
path: /dockerImages
- name: runner-data
persistentVolumeClaim:
claimName: nfs-git-claim
initContainers:
- name: runner-register
securityContext:
runAsUser: 1001
runAsGroup: 1001
image: code.forgejo.org/forgejo/runner:5.0.4
command:
- sh
- -c
- |
forgejo-runner register \
--no-interactive \
--token ${RUNNER_SECRET} \
--name ${RUNNER_NAME} \
--instance ${FORGEJO_INSTANCE_URL} \
--labels "host:host,docker:docker://node:20-bullseye";
env:
- name: RUNNER_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: RUNNER_SECRET
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: token
- name: FORGEJO_INSTANCE_URL
value: https://git.askar.tv
resources:
limits:
cpu: "0.50"
memory: "64Mi"
volumeMounts:
- name: runner-data
mountPath: /data
subPath: runner-data
containers:
- name: runner
image: code.forgejo.org/forgejo/runner:5.0.4
securityContext:
runAsUser: 0
runAsGroup: 0
command:
- sh
- -c
- |
apk add --no-cache docker nodejs && echo "Docker Installer";
while ! nc -z localhost 2376 </dev/null; do
echo 'Waiting for Docker daemon...';
sleep 5;
done;
# while ! docker version >/dev/null 2>&1; do
# echo 'Docker CLI is installed but the daemon is not ready yet...';
# sleep 5;
# done;
echo 'Docker daemon is ready!';
docker context create multiarch || echo "Context already exists" && \
docker buildx create multiarch --use || echo "Buildx already set up";
forgejo-runner daemon;
env:
- name: DOCKER_HOST
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
value: "1"
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: runner-data
mountPath: /data
subPath: runner-data
- name: daemon
image: docker:27.4.1-dind
env:
- name: DOCKER_TLS_CERTDIR
value: /certs
securityContext:
runAsUser: 0
runAsGroup: 0
privileged: true
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: docker-storage
mountPath: /var/lib/docker

104
development/deployment.yaml Normal file
View file

@ -0,0 +1,104 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: forgejo
namespace: development
spec:
replicas: 1
selector:
matchLabels:
app: forgejo
template:
metadata:
labels:
app: forgejo
spec:
# nodeSelector:
# node-role.kubernetes.io/master: "true"
containers:
- name: forgejo
image: codeberg.org/forgejo/forgejo:9.0.3
ports:
- containerPort: 3000
- containerPort: 22
volumeMounts:
- name: forgejo-data
mountPath: /data
subPath: forgejo-instance
env:
- name: FORGEJO_ADMIN_USER
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: admin-user
- name: FORGEJO_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: admin-password
- name: FORGEJO_SECRET
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: secret
volumes:
- name: forgejo-data
persistentVolumeClaim:
claimName: nfs-git-claim
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: development
labels:
io.kompose.service: forgejo-db
name: forgejo-db
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: forgejo-db
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: forgejo-db
spec:
containers:
- env:
- name: POSTGRES_DB
value: forgejo
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-forgejo-password
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-user
- name: TZ
value: Asia/Kuwait
- name: PGDATA
value: /var/lib/postgresql/data
- name: PGID
value: "1000"
- name: PUID
value: "1000"
image: postgres:15.2
name: forgejo-db
securityContext:
runAsUser: 999
runAsGroup: 999
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: git
subPath: forgejo-db
restartPolicy: Always
volumes:
- name: git
persistentVolumeClaim:
claimName: nfs-git-claim

36
development/ingress.yaml Normal file
View file

@ -0,0 +1,36 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: development-ingress
namespace: development
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`git.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
namespace: default
- name : analytics
namespace: umami
services:
- name: forgejo
port: 3003
tls:
certResolver: le
---
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: development-ssh
namespace: development
spec:
entryPoints:
- ssh
routes:
- match: HostSNI(`*`)
services:
- name: forgejo
port: 2222

35
development/pv.yaml Normal file
View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-git-volume
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
mountOptions:
- rw
- sync
- noatime
- vers=4.2
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-git-volume
volumeAttributes:
server: 192.168.0.100
share: /git
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-git-claim
namespace: development
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
volumeName: nfs-git-volume
storageClassName: ""

36
development/service.yaml Normal file
View file

@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: forgejo
namespace: development
spec:
selector:
app: forgejo
ports:
- port: 3003
targetPort: 3000
name: instance
- port: 2222
targetPort: 22
name: ssh
---
apiVersion: v1
kind: Service
metadata:
name: forgejo-db
namespace: development
labels:
io.kompose.service: forgejo-db
spec:
type: ClusterIP
ports:
- port: 5434
targetPort: 5432
protocol: TCP
selector:
io.kompose.service: forgejo-db
---

View file

@ -0,0 +1,57 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: external-services
namespace: external-services
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`az.askar.tv`)
kind: Rule
services:
- name: motomo
port: 8080
- match: Host(`i.askar.tv`)
kind: Rule
services:
- name: immich
port: 2283
- match: Host(`ai.askar.tv`)
kind: Rule
middlewares:
- name : analytics
namespace: umami
services:
- name: aichat
port: 3002
- match: Host(`offline.askar.tv`)
kind: Rule
middlewares:
- name : analytics
namespace: umami
services:
- name: offline
namespace: default
port: 7000
- match: Host(`n8n.askar.tv`)
kind: Rule
middlewares:
- name : analytics
namespace: umami
services:
- name: n8n
port: 5678
- match: Host(`dlg.askar.tv`)
kind: Rule
middlewares:
- name : analytics
namespace: umami
services:
- name: qbitorrent
port: 1235
tls:
certResolver: le

View file

@ -0,0 +1,138 @@
apiVersion: v1
kind: Service
metadata:
name: motomo
namespace: external-services
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
---
apiVersion: v1
kind: Endpoints
metadata:
name: motomo
namespace: external-services
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 8080
---
apiVersion: v1
kind: Service
metadata:
name: immich
namespace: external-services
spec:
type: ClusterIP
ports:
- port: 2283
targetPort: 2283
---
apiVersion: v1
kind: Endpoints
metadata:
name: immich
namespace: external-services
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 2283
---
apiVersion: v1
kind: Service
metadata:
name: aichat
namespace: external-services
spec:
type: ClusterIP
ports:
- port: 3002
targetPort: 3002
---
apiVersion: v1
kind: Endpoints
metadata:
name: aichat
namespace: external-services
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: offline
namespace: external-services
spec:
type: ClusterIP
ports:
- port: 7000
targetPort: 7000
---
apiVersion: v1
kind: Endpoints
metadata:
name: offline
namespace: external-services
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 7000
---
apiVersion: v1
kind: Service
metadata:
name: n8n
namespace: external-services
spec:
type: ClusterIP
ports:
- port: 5678
targetPort: 5678
---
apiVersion: v1
kind: Endpoints
metadata:
name: n8n
namespace: external-services
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 5678
---
apiVersion: v1
kind: Service
metadata:
name: qbitorrent
namespace: external-services
spec:
type: ClusterIP
ports:
- port: 1235
targetPort: 1235
---
apiVersion: v1
kind: Endpoints
metadata:
name: qbitorrent
namespace: external-services
subsets:
- addresses:
- ip: 192.168.0.100
ports:
- port: 1235

153
keel/deployment.yaml Normal file
View file

@ -0,0 +1,153 @@
apiVersion: v1
kind: Namespace
metadata:
name: keel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: keel
namespace: keel
labels:
app: keel
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: keel
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- watch
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- watch
- list
- apiGroups:
- ""
- extensions
- apps
- batch
resources:
- pods
- replicasets
- replicationcontrollers
- statefulsets
- deployments
- daemonsets
- jobs
- cronjobs
verbs:
- get
- delete
- watch
- list
- update
- apiGroups:
- ""
resources:
- configmaps
- pods/portforward
verbs:
- get
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: keel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: keel
subjects:
- kind: ServiceAccount
name: keel
namespace: keel
---
apiVersion: v1
kind: Service
metadata:
name: keel
namespace: keel
labels:
app: keel
spec:
type: LoadBalancer
ports:
- port: 9300
targetPort: 9300
protocol: TCP
name: keel
selector:
app: keel
sessionAffinity: None
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: keel
namespace: keel
labels:
app: keel
spec:
replicas: 1
selector:
matchLabels:
app: keel
template:
metadata:
labels:
app: keel
spec:
serviceAccountName: keel
containers:
- name: keel
image: "keelhq/keel:0.20.0"
imagePullPolicy: Always
command: ["/bin/keel"]
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: WEBHOOK_ENDPOINT
value: "http://192.168.0.200:9090/notify"
- name: NOTIFICATION_LEVEL
value: "info"
ports:
- containerPort: 9300
livenessProbe:
httpGet:
path: /healthz
port: 9300
initialDelaySeconds: 30
timeoutSeconds: 10
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 50m
memory: 64Mi
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: keel
namespace: keel
spec:
maxUnavailable: 1
selector:
matchLabels:
app: keel

View file

@ -0,0 +1,21 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: extract-device-id
namespace: default
spec:
headers:
customRequestHeaders:
X-Device-Id: "{regexExtract:Authorization,MediaBrowser Client=\"[^\"]+\", Device=\"[^\"]+\", DeviceId=\"([^\"]+)\"}"
---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: set-device-cookie
namespace: default
spec:
headers:
customResponseHeaders:
Set-Cookie: "jellyfin-session={req.X-Device-Id}; Path=/; Max-Age=86400; HttpOnly; Secure"

View file

@ -0,0 +1,9 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: digest-auth
namespace: default
spec:
digestAuth:
realm: "PublishAPI"
secret: traefik-digest-auth

View file

@ -0,0 +1,9 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: https-redirect
namespace: default
spec:
redirectScheme:
scheme: https
permanent: true

View file

@ -0,0 +1,11 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: lan-only
namespace: default
spec:
ipAllowList:
sourceRange:
- 192.168.0.0/24
- 127.0.0.1/32

View file

@ -0,0 +1,10 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: offline
namespace: default
spec:
redirectRegex:
regex: "^.*"
replacement: "https://offline.askar.tv/"
permanent: false

View file

@ -0,0 +1,31 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-ssd-traefik
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-ssd-traefik
volumeAttributes:
server: 192.168.0.200
share: /SSD/media
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-ssd-traefik-pvc
namespace: traefik
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
volumeName: nfs-ssd-traefik
storageClassName: ""

View file

@ -0,0 +1,15 @@
#!/usr/bin/env bash
kubectl patch svc traefik -n traefik --type='json' -p='[
{
"op": "add",
"path": "/spec/ports/-",
"value": {
"name": "ssh",
"port": 22,
"targetPort": 22,
"nodePort": 30222,
"protocol": "TCP"
}
}
]'

View file

@ -0,0 +1,62 @@
ports:
web:
port: 80
targetPort: 80
nodePort: 30808
websecure:
port: 443
targetPort: 443
nodePort: 30443
ssh:
port: 22
targetport: 2222
nodePort: 30222
service:
spec:
externalTrafficPolicy: Local
nodeSelector:
kubernetes.io/hostname: alaskarserver
additionalArguments:
- "--entrypoints.web.forwardedHeaders.trustedIPs=0.0.0.0/0"
- "--entrypoints.websecure.forwardedHeaders.trustedIPs=0.0.0.0/0"
- "--entrypoints.ssh.address=:22"
- "--providers.kubernetescrd.allowCrossNamespace=true"
- "--log.level=INFO"
ingressClass:
enabled: true
isDefaultClass: true
name: traefik-ingress
persistence:
enabled: true
existingClaim: nfs-ssd-traefik-pvc # Persistent storage for ACME certificates
subPath: certs
initContainers:
- name: volume-permissions
image: busybox:latest
command: ["sh", "-c", "mkdir -p /data && touch /data/acme.json && chmod 600 /data/acme.json"]
volumeMounts:
- name: data
mountPath: /data
certificatesResolvers:
le:
acme:
email: sager@alaskar.dev
storage: /data/acme.json
httpChallenge:
entryPoint: web
experimental:
plugins:
traefik-umami-feeder:
moduleName: "github.com/astappiev/traefik-umami-feeder"
version: "v1.2.0"