Init Ahoy

This commit is contained in:
nomadics9 2025-01-11 05:40:20 +03:00
commit 19db3f90f4
51 changed files with 2184 additions and 0 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
secrets
loadBalancer/middlewares/umami/
docker-compose.yml

View file

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: jellyfin
name: jellyfin
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: jellyfin
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: jellyfin
spec:
nodeSelector:
role: master
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
- name: NVIDIA_VISIBLE_DEVICES
value: all # Make all GPUs visible
- name: NVIDIA_DRIVER_CAPABILITIES
value: all # Required for Jellyfin hardware acceleration
image: nomadics/alaskarfin:latest
name: jellyfin
ports:
- containerPort: 8096
protocol: TCP
resources:
limits:
nvidia.com/gpu: 1
requests:
nvidia.com/gpu: 1
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/jellyarr/jellyfin_config
- mountPath: /data/tvshows
name: ssd
subPath: tvshows
- mountPath: /data/movies
name: ssd
subPath: movies
- mountPath: /data/anime
name: ssd
subPath: anime
- mountPath: /data/books/audiobooks
name: ssd
subPath: books/audiobooks
- mountPath: /data/books/ebooks
name: ssd
subPath: books/ebooks
- mountPath: /data/HDD/media
name: hdd
restartPolicy: Always
runtimeClassName: nvidia
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: jellyfin
name: jellyfin
spec:
type: ClusterIP
ports:
- name: "8096"
port: 8096
targetPort: 8096
selector:
io.kompose.service: jellyfin

View file

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 1m"
labels:
io.kompose.service: jellyseerr
name: jellyseerr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: jellyseerr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: jellyseerr
spec:
containers:
- env:
- name: JELLYFIN_TYPE
value: jellyfin
- name: LOG_LEVEL
value: debug
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: nomadics/alaskarseer:latest
name: jellyseerr
ports:
- containerPort: 5055
protocol: TCP
volumeMounts:
- mountPath: /app/config
name: ssd
subPath: configs/jellyarr/jellyseerr_config
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: jellyseerr
name: jellyseerr
spec:
type: ClusterIP
ports:
- name: "5055"
port: 5055
targetPort: 5055
selector:
io.kompose.service: jellyseerr

View file

@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: api-server
labels:
app: api-server
spec:
replicas: 1
selector:
matchLabels:
app: api-server
template:
metadata:
labels:
app: api-server
spec:
nodeSelector:
role: master
containers:
- name: api-server
image: docker.io/nomadics/api-forge:latest
imagePullPolicy: Never
ports:
- containerPort: 8080
env:
- name: GIT_USERNAME
valueFrom:
secretKeyRef:
name: api-server-secrets
key: GIT_USERNAME
- name: GIT_EMAIL
valueFrom:
secretKeyRef:
name: api-server-secrets
key: GIT_EMAIL
volumeMounts:
- name: alaskartv-data
mountPath: /data/alaskartv-forge
- name: alaskartv-ssh
mountPath: /home/appuser/.ssh
volumes:
- name: alaskartv-data
hostPath:
path: /home/sager/alaskartv-forge
- name: alaskartv-ssh
hostPath:
path: /home/sager/.ssh

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: api-server
labels:
app: api-server
spec:
type: LoadBalancer
ports:
- port: 9090
targetPort: 8080
protocol: TCP
selector:
app: api-server

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-init-script
data:
create-databases.sh: |
#!/bin/bash
psql -U postgres <<-EOSQL
CREATE DATABASE "radarr-log";
CREATE DATABASE "sonarr-main";
CREATE DATABASE "sonarr-log";
CREATE DATABASE "bazarr"
CREATE DATABASE "prowlarr-main";
CREATE DATABASE "prowlarr-log";
EOSQL
---

View file

@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: arr-db
name: arr-db
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: arr-db
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: arr-db
spec:
containers:
- env:
- name: POSTGRES_DB
value: radarr-main
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres-secrets
key: postgres-password
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: postgres-secrets
key: postgres-user
- name: TZ
value: Asia/Kuwait
image: postgres:15.2
name: arr-db
volumeMounts:
- mountPath: /docker-entrypoint-initdb.d
name: init-script
- mountPath: /var/lib/postgresql/data
name: ssd
subPath: configs/databases/arrdatabase
restartPolicy: Always
volumes:
- name: init-script
configMap:
name: postgres-init-script
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
---
# apiVersion: v1
# kind: Pod
# metadata:
# name: migration-pod
# labels:
# app: migration-pod
# spec:
# containers:
# - name: migration
# image: ubuntu:latest
# command: ["/bin/bash"]
# args: ["-c", "apt-get update && apt-get install -y pgloader && sleep infinity"]
# volumeMounts:
# - mountPath: /data/configs
# name: ssd
# subPath: configs
# volumes:
# - name: ssd
# persistentVolumeClaim:
# claimName: nfs-ssd-pvc
# restartPolicy: Never
#
---

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: arr-db
labels:
io.kompose.service: arr-db
spec:
type: ClusterIP
ports:
- port: 5433
targetPort: 5432
protocol: TCP
selector:
io.kompose.service: arr-db

View file

@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/match-tag: "true"
keel.sh/pollSchedule: "@every 6h"
spec:
replicas: 1
selector:
matchLabels:
app: audiobookshelf
strategy:
type: Recreate
template:
metadata:
labels:
app: audiobookshelf
spec:
nodeSelector:
role: master
containers:
- name: audiobookshelf
image: ghcr.io/advplyr/audiobookshelf:latest
ports:
- containerPort: 80
env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/audiobookshelf/config
- mountPath: /metadata
name: ssd
subPath: configs/audiobookshelf/metadata
- mountPath: /audiobooks
subPath: books/audiobooks
name: ssd
- mountPath: /books
subPath: books/books
name: ssd
- mountPath: /podcasts
subPath: books/podcasts
name: ssd
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf
spec:
selector:
app: audiobookshelf
ports:
- port: 13378
targetPort: 80

View file

@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: bazarr
name: bazarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: bazarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: bazarr
spec:
containers:
- env:
- name: DOCKER_MODS
value: wayller/bazarr-mod-subsync:latest
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: linuxserver/bazarr:latest
name: bazarr
ports:
- containerPort: 6767
protocol: TCP
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "cp /config/batch_sync.py / && pip install colorama tqdm autosubsync"]
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/trackers/bazarr_config
- mountPath: /tvshows
name: ssd
subPath: tvshows
- mountPath: /movies
name: ssd
subPath: movies
- mountPath: /anime
name: ssd
subPath: anime
- mountPath: /data/HDD/media
name: hdd
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: bazarr
name: bazarr
spec:
type: ClusterIP
ports:
- name: "6767"
port: 6767
targetPort: 6767
selector:
io.kompose.service: bazarr

View file

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: transmission
name: transmission
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: transmission
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: transmission
spec:
containers:
- env:
- name: PASS
valueFrom:
secretKeyRef:
name: dl-secrets
key: password
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
- name: USER
valueFrom:
secretKeyRef:
name: dl-secrets
key: user
image: lscr.io/linuxserver/transmission:latest
name: transmission
ports:
- containerPort: 9091
protocol: TCP
- containerPort: 51413
protocol: TCP
- containerPort: 51413
protocol: UDP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/download-stack/transmission
- mountPath: /downloads
name: hdd
subPath: transmission/downloads
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: transmission
name: transmission
spec:
type: ClusterIP
ports:
- name: "http-9091"
port: 9091
targetPort: 9091
- name: "tcp-51413"
port: 51413
targetPort: 51413
- name: 51413-udp
port: 51413
protocol: UDP
targetPort: 51413
selector:
io.kompose.service: transmission

View file

@ -0,0 +1,87 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: unpackerr
name: unpackerr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: unpackerr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: unpackerr
spec:
nodeSelector:
role: master
containers:
- env:
- name: TZ
value: Asia/Kuwait
- name: UN_DEBUG
value: "false"
- name: UN_DIR_MODE
value: "0755"
- name: UN_FILE_MODE
value: "0644"
- name: UN_INTERVAL
value: 2m
- name: UN_LOG_FILE
- name: UN_LOG_FILES
value: "10"
- name: UN_LOG_FILE_MB
value: "10"
- name: UN_MAX_RETRIES
value: "3"
- name: UN_PARALLEL
value: "1"
- name: UN_RADARR_0_API_KEY
value: 5bdad7cc4f33443bafefb5e185b3e0e7
- name: UN_RADARR_0_DELETE_DELAY
value: 5m
- name: UN_RADARR_0_DELETE_ORIG
value: "false"
- name: UN_RADARR_0_PATHS_0
value: /downloads
- name: UN_RADARR_0_PROTOCOLS
value: torrent
- name: UN_RADARR_0_TIMEOUT
value: 10s
- name: UN_RADARR_0_URL
value: http://radarr:7878
- name: UN_RETRY_DELAY
value: 5m
- name: UN_SONARR_0_API_KEY
value: d973448580d041b7ba7e576a7aed9b11
- name: UN_SONARR_0_DELETE_DELAY
value: 5m
- name: UN_SONARR_0_DELETE_ORIG
value: "false"
- name: UN_SONARR_0_PATHS_0
value: /downloads
- name: UN_SONARR_0_PROTOCOLS
value: torrent
- name: UN_SONARR_0_TIMEOUT
value: 10s
- name: UN_SONARR_0_URL
value: http://sonarr:8989
- name: UN_START_DELAY
value: 1m
image: golift/unpackerr
name: unpackerr
volumeMounts:
- mountPath: /downloads
name: hdd
subPath: transmission/downloads
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/match-tag: "true"
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: diun
name: diun
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: diun
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: diun
spec:
containers:
- args:
- serve
env:
- name: LOG_JSON
value: "false"
- name: LOG_LEVEL
value: info
- name: TZ
value: Asia/Kuwait
image: crazymax/diun:latest
name: diun
volumeMounts:
- mountPath: /data
name: ssd
subPath: configs/diun/data
- mountPath: /diun.yml
name: ssd
subPath: configs/diun/data/diun.yml
readOnly: true
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: searcharr
name: searcharr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: searcharr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: searcharr
spec:
containers:
- env:
- name: TZ
value: Asia/Kuwait
image: toddrob/searcharr:latest
name: searcharr
volumeMounts:
- mountPath: /app/data
name: ssd
subPath: configs/searcharr/data
- mountPath: /app/logs
name: ssd
subPath: configs/searcharr/logs
- mountPath: /app/settings.py
name: ssd
subPath: configs/searcharr/settings.py
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: suggestarr
name: suggestarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: suggestarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: suggestarr
spec:
containers:
- image: ciuse99/suggestarr:latest
name: suggestarr
ports:
- containerPort: 5000
protocol: TCP
volumeMounts:
- mountPath: /app/config/config_files
name: ssd
subPath: configs/searcharr/config_files
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: suggestarr
name: suggestarr
spec:
type: NodePort
ports:
- name: "5000"
port: 5000
targetPort: 5000
nodePort: 30005
selector:
io.kompose.service: suggestarr

106
alaskarTV/ingress.yaml Normal file
View file

@ -0,0 +1,106 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: media-services
namespace: default
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`askar.tv`) || Host(`www.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: jellyfin
port: 8096
- match: Host(`r.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: jellyseerr
port: 5055
- match: Host(`stats.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: jellystat
port: 3000
- match: Host(`sync.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: bazarr
port: 6767
- match: Host(`user.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: jfa-go
port: 8056
- match: Host(`pr.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
services:
- name: prowlarr
port: 9696
- match: Host(`rr.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
services:
- name: radarr
port: 7878
- match: Host(`sr.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
services:
- name: sonarr
port: 8989
- match: Host(`dl.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: transmission
port: 9091
- match: Host(`books.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: audiobookshelf
port: 13378
- match: Host(`apo.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
namespace: default
- name: digest-auth
services:
- name: api-server
port: 9090
tls:
certResolver: le

View file

@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: jfa-go
name: jfa-go
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: jfa-go
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: jfa-go
spec:
containers:
- image: hrfee/jfa-go:latest
name: jfa-go
ports:
- containerPort: 8056
protocol: TCP
volumeMounts:
- mountPath: /data
name: ssd
subPath: configs/jfa-go/config/jfa-go
- mountPath: /jf
name: ssd
subPath: configs/jellyarr/jellyfin_config
- mountPath: /etc/localtime
name: ssd
subPath: configs/jfa-go/localtime
readOnly: true
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: jfa-go
name: jfa-go
spec:
type: ClusterIP
ports:
- name: "8056"
port: 8056
targetPort: 8056
selector:
io.kompose.service: jfa-go

64
alaskarTV/pv.yaml Normal file
View file

@ -0,0 +1,64 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-ssd
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-ssd
volumeAttributes:
server: 192.168.0.200
share: /SSD/media
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-hdd
spec:
capacity:
storage: 6Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-hdd
volumeAttributes:
server: 192.168.0.200
share: /HDD/media
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-ssd-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
volumeName: nfs-ssd
storageClassName: ""
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-hdd-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 6Ti
volumeName: nfs-hdd
storageClassName: ""
---

View file

@ -0,0 +1,114 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: jellystat
name: jellystat
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: jellystat
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: jellystat
spec:
containers:
- env:
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: database-secrets
key: jwt
- name: POSTGRES_IP
value: jellystat-db
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-stats-password
- name: POSTGRES_PORT
value: "5432"
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-user
- name: TZ
value: Asia/Kuwait
image: cyfershepard/jellystat:unstable
name: jellystat
ports:
- containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /app/backend/backup-data
name: ssd
subPath: configs/jellystat/backup-data
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
io.kompose.service: jellystat-db
name: jellystat-db
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: jellystat-db
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: jellystat-db
spec:
containers:
- env:
- name: POSTGRES_DB
value: jfstat
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-stats-password
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-user
- name: TZ
value: Asia/Kuwait
image: postgres:15.2
name: jellystat-db
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: ssd
subPath: configs/jellystat/postgres-data
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,30 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: jellystat
name: jellystat
spec:
type: ClusterIP
ports:
- name: "3000"
port: 3000
targetPort: 3000
selector:
io.kompose.service: jellystat
---
apiVersion: v1
kind: Service
metadata:
name: jellystat-db
labels:
io.kompose.service: jellystat-db
spec:
type: ClusterIP
ports:
- port: 5432
targetPort: 5432
protocol: TCP
selector:
io.kompose.service: jellystat-db

View file

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: prowlarr
name: prowlarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: prowlarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: prowlarr
spec:
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
- name: UMASK
value: "002"
image: ghcr.io/hotio/prowlarr
name: prowlarr
ports:
- containerPort: 9696
protocol: TCP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/trackers/prowlarr_config
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: prowlarr
name: prowlarr
spec:
type: ClusterIP
ports:
- name: "9696"
port: 9696
targetPort: 9696
selector:
io.kompose.service: prowlarr

View file

@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: radarr
name: radarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: radarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: radarr
spec:
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: linuxserver/radarr:latest
name: radarr
ports:
- containerPort: 7878
protocol: TCP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/trackers/radarr_config
- mountPath: /movies
name: ssd
subPath: movies
- mountPath: /downloads/complete
name: hdd
subPath: transmission/downloads/complete
- mountPath: /data/HDD/media
name: hdd
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: radarr
name: radarr
spec:
type: ClusterIP
ports:
- name: "7878"
port: 7878
targetPort: 7878
selector:
io.kompose.service: radarr

View file

@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: readarr
name: readarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: readarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: readarr
spec:
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: lscr.io/linuxserver/readarr:nightly
name: readarr
ports:
- containerPort: 8787
protocol: TCP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/jellyarr/readarr_config
- mountPath: /data/ebooks
name: ssd
subPath: books/ebooks
- mountPath: /downloads/complete
name: hdd
subPath: transmission/downloads/complete
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: readarr
name: readarr
spec:
type: NodePort
ports:
- name: "8585"
port: 8585
targetPort: 8787
nodePort: 30787
selector:
io.kompose.service: readarr

View file

@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: sonarr
name: sonarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: sonarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: sonarr
spec:
containers:
- env:
- name: PUID
value: "1000"
image: linuxserver/sonarr:latest
name: sonarr
ports:
- containerPort: 8989
protocol: TCP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/trackers/sonarr_config
- mountPath: /tvshows
name: ssd
subPath: tvshows
- mountPath: /anime
name: ssd
subPath: anime
- mountPath: /downloads/complete
name: hdd
subPath: transmission/downloads/complete
- mountPath: /data/HDD/media
name: hdd
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: sonarr
name: sonarr
spec:
type: ClusterIP
ports:
- name: "8989"
port: 8989
targetPort: 8989
selector:
io.kompose.service: sonarr

View file

@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: speakarr
name: speakarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: speakarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: speakarr
spec:
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: lscr.io/linuxserver/readarr:nightly
name: speakarr
ports:
- containerPort: 8787
protocol: TCP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/jellyarr/speakarr_config
- mountPath: /data/audiobooks
name: ssd
subPath: books/audiobooks
- mountPath: /downloads/complete
name: hdd
subPath: transmission/downloads/complete
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: speakarr
name: speakarr
spec:
type: NodePort
ports:
- name: "8787"
port: 8787
targetPort: 8787
nodePort: 30887
selector:
io.kompose.service: speakarr

View file

@ -0,0 +1,91 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/match-tag: "true"
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: tdarr
name: tdarr
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: tdarr
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: tdarr
spec:
nodeSelector:
role: master
containers:
- env:
- name: NVIDIA_DRIVER_CAPABILITIES
value: all
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: inContainer
value: "true"
- name: internalNode
value: "true"
- name: nodeName
value: MasterNode
- name: serverIP
value: tdarr-server
- name: serverPort
value: "8266"
- name: webUIPort
value: "8265"
image: ghcr.io/haveagitgat/tdarr:latest
name: tdarr
resources:
limits:
nvidia.com/gpu: 1
requests:
nvidia.com/gpu: 1
ports:
- containerPort: 8265
protocol: TCP
- containerPort: 8266
protocol: TCP
volumeMounts:
- mountPath: /app/server
name: ssd
subPath: configs/transcoding/docker/tdarr/server
- mountPath: /app/logs
name: ssd
subPath: configs/transcoding/docker/tdarr/logs
- mountPath: /app/configs
name: ssd
subPath: configs/transcoding/docker/tdarr/configs
- mountPath: /media
name: ssd
- mountPath: /data/HDD/media
name: hdd
- mountPath: /temp
name: ssd
subPath: configs/transcoding/transcode_cache
restartPolicy: Always
runtimeClassName: nvidia
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc

View file

@ -0,0 +1,30 @@
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: tdarr
name: tdarr
spec:
type: NodePort
ports:
- name: "webui-tdarr"
port: 8265
targetPort: 8265
nodePort: 30265
selector:
io.kompose.service: tdarr
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: tdarr
name: tdarr-server
spec:
type: ClusterIP
ports:
- name: "server-tdarr"
port: 8266
targetPort: 8266
selector:
io.kompose.service: tdarr

View file

@ -0,0 +1,120 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: forgejo-runner
name: forgejo-runner
namespace: development
spec:
replicas: 1
selector:
matchLabels:
app: forgejo-runner
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: forgejo-runner
spec:
restartPolicy: Always
volumes:
- name: docker-certs
emptyDir: {}
- name: docker-storage
hostPath:
path: /dockerImages
- name: runner-data
persistentVolumeClaim:
claimName: nfs-git-claim
initContainers:
- name: runner-register
securityContext:
runAsUser: 1001
runAsGroup: 1001
image: code.forgejo.org/forgejo/runner:5.0.4
command:
- sh
- -c
- |
forgejo-runner register \
--no-interactive \
--token ${RUNNER_SECRET} \
--name ${RUNNER_NAME} \
--instance ${FORGEJO_INSTANCE_URL} \
--labels "host:host,docker:docker://node:20-bullseye";
env:
- name: RUNNER_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: RUNNER_SECRET
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: token
- name: FORGEJO_INSTANCE_URL
value: https://git.askar.tv
resources:
limits:
cpu: "0.50"
memory: "64Mi"
volumeMounts:
- name: runner-data
mountPath: /data
subPath: runner-data
containers:
- name: runner
image: code.forgejo.org/forgejo/runner:5.0.4
securityContext:
runAsUser: 0
runAsGroup: 0
command:
- sh
- -c
- |
apk add --no-cache docker nodejs && echo "Docker Installer";
while ! nc -z localhost 2376 </dev/null; do
echo 'Waiting for Docker daemon...';
sleep 5;
done;
# while ! docker version >/dev/null 2>&1; do
# echo 'Docker CLI is installed but the daemon is not ready yet...';
# sleep 5;
# done;
echo 'Docker daemon is ready!';
docker context create multiarch || echo "Context already exists" && \
docker buildx create multiarch --use || echo "Buildx already set up";
forgejo-runner daemon;
env:
- name: DOCKER_HOST
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
value: "1"
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: runner-data
mountPath: /data
subPath: runner-data
- name: daemon
image: docker:27.4.1-dind
env:
- name: DOCKER_TLS_CERTDIR
value: /certs
securityContext:
runAsUser: 0
runAsGroup: 0
privileged: true
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: docker-storage
mountPath: /var/lib/docker

102
development/deployment.yaml Normal file
View file

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: forgejo
namespace: development
spec:
replicas: 1
selector:
matchLabels:
app: forgejo
template:
metadata:
labels:
app: forgejo
spec:
# nodeSelector:
# node-role.kubernetes.io/master: "true"
containers:
- name: forgejo
image: codeberg.org/forgejo/forgejo:9.0.3
ports:
- containerPort: 3000
- containerPort: 22
volumeMounts:
- name: forgejo-data
mountPath: /data
subPath: forgejo-instance
env:
- name: FORGEJO_ADMIN_USER
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: admin-user
- name: FORGEJO_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: admin-password
- name: FORGEJO_SECRET
valueFrom:
secretKeyRef:
name: forgejo-secrets
key: secret
volumes:
- name: forgejo-data
persistentVolumeClaim:
claimName: nfs-git-claim
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: development
labels:
io.kompose.service: forgejo-db
name: forgejo-db
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: forgejo-db
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: forgejo-db
spec:
containers:
- env:
- name: POSTGRES_DB
value: forgejo
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-forgejo-password
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: database-secrets
key: postgres-user
- name: TZ
value: Asia/Kuwait
- name: PGDATA
value: /var/lib/postgresql/data
- name: PGID
value: "1000"
- name: PUID
value: "1000"
image: postgres:15.2
name: forgejo-db
volumeMounts:
- mountPath: /var/lib/postgresql
name: git
subPath: forgejo-db
restartPolicy: Always
volumes:
- name: git
persistentVolumeClaim:
claimName: nfs-git-claim

36
development/ingress.yaml Normal file
View file

@ -0,0 +1,36 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: development-ingress
namespace: development
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`git.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
namespace: default
- name : analytics
namespace: umami
services:
- name: forgejo
port: 3003
tls:
certResolver: le
---
apiVersion: traefik.io/v1alpha1
kind: IngressRouteTCP
metadata:
name: development-ssh
namespace: development
spec:
entryPoints:
- ssh
routes:
- match: HostSNI(`*`)
services:
- name: forgejo
port: 2222

37
development/pv.yaml Normal file
View file

@ -0,0 +1,37 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-git-volume
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
mountOptions:
- rw
- sync
- noatime
- vers=4.2
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-git-volume
volumeAttributes:
server: 192.168.0.100
share: /git
uid: "1000"
gid: "1000"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-git-claim
namespace: development
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
volumeName: nfs-git-volume
storageClassName: ""

36
development/service.yaml Normal file
View file

@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: forgejo
namespace: development
spec:
selector:
app: forgejo
ports:
- port: 3003
targetPort: 3000
name: instance
- port: 2222
targetPort: 22
name: ssh
---
apiVersion: v1
kind: Service
metadata:
name: forgejo-db
namespace: development
labels:
io.kompose.service: forgejo-db
spec:
type: ClusterIP
ports:
- port: 5434
targetPort: 5432
protocol: TCP
selector:
io.kompose.service: forgejo-db
---

View file

@ -0,0 +1,30 @@
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: external-services
namespace: default
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`az.askar.tv`)
kind: Rule
services:
- name: motomo
port: 8080
- match: Host(`i.askar.tv`)
kind: Rule
services:
- name: immich
port: 2283
- match: Host(`ai.askar.tv`)
kind: Rule
middlewares:
- name : analytics
namespace: umami
services:
- name: aichat
port: 3002
tls:
certResolver: le

View file

@ -0,0 +1,61 @@
apiVersion: v1
kind: Service
metadata:
name: motomo
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
---
apiVersion: v1
kind: Endpoints
metadata:
name: motomo
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 8080
---
apiVersion: v1
kind: Service
metadata:
name: immich
spec:
type: ClusterIP
ports:
- port: 2283
targetPort: 2283
---
apiVersion: v1
kind: Endpoints
metadata:
name: immich
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 2283
---
apiVersion: v1
kind: Service
metadata:
name: aichat
spec:
type: ClusterIP
ports:
- port: 3002
targetPort: 3002
---
apiVersion: v1
kind: Endpoints
metadata:
name: aichat
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 3002

View file

@ -0,0 +1,8 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: digest-auth
spec:
digestAuth:
realm: "PublishAPI"
secret: traefik-digest-auth

View file

@ -0,0 +1,9 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: https-redirect
namespace: default
spec:
redirectScheme:
scheme: https
permanent: true

View file

@ -0,0 +1,10 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: lan-only
spec:
ipAllowList:
sourceRange:
- 192.168.0.0/24
- 127.0.0.1/32

View file

@ -0,0 +1,63 @@
ports:
web:
port: 80
targetPort: 80
nodePort: 30808
websecure:
port: 443
targetPort: 443
nodePort: 30443
ssh:
port: 2222
targetport: 2222
nodePort: 30222
service:
type: LoadBalancer
spec:
externalTrafficPolicy: Local
nodeSelector:
kubernetes.io/hostname: alaskarserver
additionalArguments:
- "--entrypoints.web.forwardedHeaders.trustedIPs=0.0.0.0/0"
- "--entrypoints.websecure.forwardedHeaders.trustedIPs=0.0.0.0/0"
- "--entrypoints.ssh.address=:2222"
- "--providers.kubernetescrd.allowCrossNamespace=true"
- "--log.level=INFO"
ingressClass:
enabled: true
isDefaultClass: true
name: traefik-ingress
persistence:
enabled: true
existingClaim: nfs-ssd-traefik-pvc # Persistent storage for ACME certificates
subPath: certs
initContainers:
- name: volume-permissions
image: busybox:latest
command: ["sh", "-c", "mkdir -p /data && touch /data/acme.json && chmod 600 /data/acme.json"]
volumeMounts:
- name: data
mountPath: /data
certificatesResolvers:
le:
acme:
email: sager@alaskar.dev
storage: /data/acme.json
httpChallenge:
entryPoint: web
experimental:
plugins:
traefik-umami-feeder:
moduleName: "github.com/astappiev/traefik-umami-feeder"
version: "v1.2.0"

View file

@ -0,0 +1,31 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-ssd-traefik
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-ssd-traefik
volumeAttributes:
server: 192.168.0.200
share: /SSD/media
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-ssd-traefik-pvc
namespace: traefik
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
volumeName: nfs-ssd-traefik
storageClassName: ""