Compare commits

..

2 commits

Author SHA1 Message Date
nomadics9
8050884f51
debrid syslinks base 2025-02-01 22:13:16 +03:00
nomadics9
ffc9d0ebd1
debrid syslinks base 2025-02-01 22:13:08 +03:00
41 changed files with 1372 additions and 232 deletions

1
.gitignore vendored
View file

@ -1,3 +1,4 @@
secrets
loadBalancer/middlewares/umami/
docker-compose.yml
dockerCompose/debrid-syslinks/torRclone/rclone.conf

View file

@ -0,0 +1,26 @@
#!/bin/bash
echo "Do you want to copy:"
echo "1) Into worker"
echo "2) From worker"
read -p "Enter your choice (1 or 2): " choice
if [[ "$choice" == "1" ]]; then
echo "Copying into worker..."
sudo cp -r /SSD/media/configs/jellyarr/jellyfin_config/* /SSD/media/configs/jellyarr/jellyfin_worker_config/
echo "Finished!"
elif [[ "$choice" == "2" ]]; then
read -p "Are you sure you want to copy FROM worker? (y/n): " confirm
if [[ "$confirm" == "y" || "$confirm" == "Y" ]]; then
echo "Copying from worker..."
sudo cp -r /SSD/media/configs/jellyarr/jellyfin_worker_config/* /SSD/media/configs/jellyarr/jellyfin_config/
echo "Finished!"
else
echo "Operation canceled."
fi
else
echo "Invalid choice. Please enter 1 or 2."
fi

View file

@ -0,0 +1,86 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: jellyfin-worker
name: jellyfin-worker
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: jellyfin-worker
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: jellyfin-worker
spec:
nodeSelector:
role: worker
securityContext:
supplementalGroups:
- 105
runAsUser: 1000
runAsGroup: 1000
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
securityContext:
privileged: true
image: nomadics/alaskarfin:latest
name: jellyfin
ports:
- containerPort: 8096
protocol: TCP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/jellyarr/jellyfin_worker_config
- mountPath: /data/tvshows
name: ssd
subPath: tvshows
- mountPath: /data/movies
name: ssd
subPath: movies
- mountPath: /data/anime
name: ssd
subPath: anime
- mountPath: /data/books/audiobooks
name: ssd
subPath: books/audiobooks
- mountPath: /data/books/ebooks
name: ssd
subPath: books/ebooks
- mountPath: /HDD
name: hdd
- mountPath: /mnt/zurg/__all__
name: rclone-data
subPath: __all__
- name: render-d128
mountPath: /dev/dri/renderD128
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: render-d128
hostPath:
path: /dev/dri/renderD128

View file

@ -22,6 +22,10 @@ spec:
spec:
nodeSelector:
role: master
securityContext:
supplementalGroups:
- 109
- 44
containers:
- env:
- name: PGID
@ -31,9 +35,11 @@ spec:
- name: TZ
value: Asia/Kuwait
- name: NVIDIA_VISIBLE_DEVICES
value: all # Make all GPUs visible
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: all # Required for Jellyfin hardware acceleration
value: all
securityContext:
privileged: true
image: nomadics/alaskarfin:latest
name: jellyfin
ports:
@ -63,11 +69,13 @@ spec:
- mountPath: /data/books/ebooks
name: ssd
subPath: books/ebooks
- mountPath: /data/HDD/media
- mountPath: /HDD
name: hdd
- mountPath: /data/rclone/__all__
- mountPath: /mnt/zurg/__all__
name: rclone-data
path: __all__
subPath: __all__
- name: render-d128
mountPath: /dev/dri/renderD128
restartPolicy: Always
runtimeClassName: nvidia
volumes:
@ -80,4 +88,9 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: render-d128
hostPath:
path: /dev/dri/renderD128

View file

@ -12,3 +12,21 @@ spec:
targetPort: 8096
selector:
io.kompose.service: jellyfin
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: jellyfin-worker
name: jellyfin-worker
spec:
type: ClusterIP
ports:
- name: "8096"
port: 8096
targetPort: 8096
selector:
io.kompose.service: jellyfin-worker

View file

@ -7,8 +7,12 @@ data:
#!/bin/bash
psql -U postgres <<-EOSQL
CREATE DATABASE "radarr-log";
CREATE DATABASE "radarr4k-main";
CREATE DATABASE "radarr4k-log";
CREATE DATABASE "sonarr-main";
CREATE DATABASE "sonarr4k-main";
CREATE DATABASE "sonarr-log";
CREATE DATABASE "sonarr4k-log";
CREATE DATABASE "bazarr"
CREATE DATABASE "prowlarr-main";
CREATE DATABASE "prowlarr-log";

View file

@ -20,6 +20,8 @@ spec:
labels:
io.kompose.service: bazarr
spec:
nodeSelector:
role: master
containers:
- env:
- name: DOCKER_MODS
@ -54,7 +56,7 @@ spec:
subPath: anime
- mountPath: /HDD
name: hdd
- mountPath: /data/rclone/__all__
- mountPath: /mnt/zurg/__all__
name: rclone-data
subPath: __all__
restartPolicy: Always
@ -69,3 +71,76 @@ spec:
persistentVolumeClaim:
claimName: rclone-data-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: bazarr4k
name: bazarr4k
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: bazarr4k
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: bazarr4k
spec:
nodeSelector:
role: worker
containers:
- env:
- name: DOCKER_MODS
value: wayller/bazarr-mod-subsync:latest
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: linuxserver/bazarr:latest
name: bazarr
ports:
- containerPort: 6767
protocol: TCP
# lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "cp /config/batch_sync.py / && pip install colorama tqdm autosubsync"]
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/trackers/bazarr4k_config
- mountPath: /tvshows
name: ssd
subPath: tvshows
- mountPath: /movies
name: ssd
subPath: movies
- mountPath: /anime
name: ssd
subPath: anime
- mountPath: /HDD
name: hdd
- mountPath: /mnt/zurg/__all__
name: rclone-data
subPath: __all__
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc

View file

@ -12,3 +12,19 @@ spec:
targetPort: 6767
selector:
io.kompose.service: bazarr
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: bazarr4k
name: bazarr4k
spec:
type: ClusterIP
ports:
- name: "6767"
port: 6767
targetPort: 6767
selector:
io.kompose.service: bazarr4k

View file

@ -1,138 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zurg
spec:
replicas: 1
selector:
matchLabels:
app: zurg
template:
metadata:
labels:
app: zurg
spec:
containers:
- name: zurg
image: ghcr.io/debridmediamanager/zurg-testing:latest
ports:
- containerPort: 9999
volumeMounts:
- name: ssd
mountPath: /app/jellyfin_update.sh
subPath: configs/zurg/app/jellyfin_update.sh
- name: ssd
mountPath: /app/config.yml
subPath: configs/zurg/app/config.yml
- name: rclone-data
mountPath: /app/data
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rclone
spec:
replicas: 1
selector:
matchLabels:
app: rclone
template:
metadata:
labels:
app: rclone
spec:
containers:
- name: rclone
image: rclone/rclone:latest
env:
- name: TZ
value: Asia/Kuwait
securityContext:
capabilities:
add: ["SYS_ADMIN"]
privileged: true
volumeMounts:
- name: rclone-data
mountPath: /data
- name: ssd
mountPath: /config/rclone/rclone.conf
subPath: configs/rclone/rclone.conf
- name: dev-fuse
mountPath: /dev/fuse
command: ["rclone", "mount", "zurg:", "/data", "--allow-other", "--umask=0022", "--uid=1000", "--gid=1000", "--dir-perms=0775", "--file-perms=0664", "--poll-interval", "30s", "--allow-non-empty", "--allow-root", "--vfs-cache-mode", "full", "--dir-cache-time", "10s", "--cache-dir", "/SSD/media/configs/rclone/cache"]
volumes:
- name: dev-fuse
hostPath:
path: /dev/fuse
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rdtclient
spec:
replicas: 1
selector:
matchLabels:
app: rdtclient
template:
metadata:
labels:
app: rdtclient
spec:
containers:
- name: rdtclient
image: rogerfar/rdtclient
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Asia/Kuwait
ports:
- containerPort: 6500
volumeMounts:
- name: hdd
mountPath: /data/downloads
subPath: media/transmission/downloads/complete/
- name: ssd
mountPath: /data/db
subPath: configs/rdtDB
- name: rclone-data
mountPath: /data/rclone/__all__
subPath: __all__
livenessProbe:
httpGet:
path: /
port: 6500
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 30
failureThreshold: 3
volumes:
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc

View file

@ -1,33 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
app: zurg
name: zurg
spec:
type: ClusterIP
ports:
- name: "9999"
port: 9999
targetPort: 9999
selector:
app: zurg
---
apiVersion: v1
kind: Service
metadata:
labels:
app: rdtclient
name: rdtclient
spec:
type: ClusterIP
ports:
- name: "6500"
port: 6500
targetPort: 6500
selector:
app: rdtclient
---

View file

@ -9,6 +9,15 @@ spec:
- websecure
routes:
- match: Host(`askar.tv`) || Host(`www.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: jellyfin-worker
port: 8096
- match: Host(`gpu.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
@ -44,6 +53,15 @@ spec:
services:
- name: bazarr
port: 6767
- match: Host(`sync4k.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: bazarr4k
port: 6767
- match: Host(`user.askar.tv`)
kind: Rule
middlewares:
@ -67,6 +85,13 @@ spec:
services:
- name: radarr
port: 7878
- match: Host(`rr4k.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
services:
- name: radarr4k
port: 7878
- match: Host(`sr.askar.tv`)
kind: Rule
middlewares:
@ -74,6 +99,13 @@ spec:
services:
- name: sonarr
port: 8989
- match: Host(`sr4k.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
services:
- name: sonarr4k
port: 8989
- match: Host(`dl.askar.tv`)
kind: Rule
middlewares:
@ -113,4 +145,29 @@ spec:
tls:
certResolver: le
---
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: media-services-internal
namespace: default
spec:
entryPoints:
- web
- websecure
routes:
- match: Host(`internal.askar.tv`)
kind: Rule
middlewares:
- name: lan-only
services:
- name: jellyfin-worker
port: 8096
- match: Host(`internal2.askar.tv`)
kind: Rule
middlewares:
- name: lan-only
services:
- name: jellyseerr
port: 5055

View file

@ -9,32 +9,11 @@ spec:
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-ssd
volumeAttributes:
server: 192.168.0.200
share: /SSD/media
nfs:
server: 192.168.0.200
path: /SSD/media
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: rclone-data
spec:
capacity:
storage: 3Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: rclone-data
volumeAttributes:
server: 192.168.0.200
share: /SSD/media/rclone-data
---
---
apiVersion: v1
kind: PersistentVolume
metadata:
@ -45,26 +24,55 @@ spec:
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
csi:
driver: nfs.csi.k8s.io
volumeHandle: nfs-hdd
volumeAttributes:
server: 192.168.0.200
share: /HDD
nfs:
server: 192.168.0.200
path: /HDD
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: alaskarserver-rclone
name: rclone-data
spec:
capacity:
storage: 3Ti
storage: 6Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt/zurg/__all__
csi:
driver: csi-rclone
volumeHandle: rclone-data
volumeAttributes:
remote: "webdav"
remotePath: "/"
webdav-url: "http://192.168.0.200:30999/dav/"
allow-other: "true"
umask: "0022"
uid: "1000"
gid: "1000"
dir-perms: "0777"
file-perms: "0666"
poll-interval: "30s"
allow-non-empty: "true"
vfs-cache-mode: "full"
dir-cache-time: "10s"
# vfs-cache-max-age: "5m"
cache-dir: "/HDD/rclone-cache"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mnt
spec:
capacity:
storage: 1Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /mnt
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@ -88,7 +96,7 @@ spec:
- ReadWriteMany
resources:
requests:
storage: 3Ti
storage: 6Ti
volumeName: rclone-data
storageClassName: ""
@ -109,12 +117,13 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: alaskarserver-rclone-pvc
name: mnt-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Ti
volumeName: alaskarserver-rclone
storage: 1Ti
volumeName: mnt
storageClassName: ""
---

View file

@ -20,6 +20,11 @@ spec:
labels:
io.kompose.service: radarr
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
nodeSelector:
role: master
containers:
- env:
- name: PGID
@ -33,6 +38,8 @@ spec:
ports:
- containerPort: 7878
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- mountPath: /config
name: ssd
@ -40,16 +47,13 @@ spec:
- mountPath: /movies
name: ssd
subPath: movies
# - mountPath: /downloads/complete
# name: hdd
# subPath: transmission/downloads/complete/
# - mountPath: /data/HDD/media
# name: hdd
- mountPath: /HDD
name: hdd
- mountPath: /data/rclone/__all__
name: rclone-data
subPath: __all__
- mountPath: data/downloads
name: ssd
subPath: downloads
- mountPath: /mnt
name: mnt
restartPolicy: Always
volumes:
- name: ssd
@ -61,4 +65,79 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: mnt
persistentVolumeClaim:
claimName: mnt-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: radarr4k
name: radarr4k
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: radarr4k
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: radarr4k
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
nodeSelector:
role: master
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: linuxserver/radarr:latest
name: radarr
ports:
- containerPort: 7878
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/trackers/radarr4k_config
- mountPath: /movies
name: ssd
subPath: movies
- mountPath: /HDD
name: hdd
- mountPath: data/downloads
name: ssd
subPath: downloads
- mountPath: /mnt
name: mnt
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: mnt
persistentVolumeClaim:
claimName: mnt-pvc

View file

@ -13,3 +13,19 @@ spec:
selector:
io.kompose.service: radarr
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: radarr4k
name: radarr4k
spec:
type: ClusterIP
ports:
- name: "7878"
port: 7878
targetPort: 7878
selector:
io.kompose.service: radarr4k

View file

@ -20,10 +20,16 @@ spec:
labels:
io.kompose.service: sonarr
spec:
nodeSelector:
role: master
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: linuxserver/sonarr:latest
name: sonarr
ports:
@ -39,14 +45,13 @@ spec:
- mountPath: /anime
name: ssd
subPath: anime
- mountPath: /downloads/complete
name: hdd
subPath: transmission/downloads/complete
- mountPath: /HDD
name: hdd
- mountPath: /data/rclone/__all__
name: rclone-data
subPath: __all__
- mountPath: data/downloads/complete
name: hdd
subPath: media/transmission/downloads/complete
- mountPath: /mnt
name: mnt
restartPolicy: Always
volumes:
- name: ssd
@ -55,7 +60,74 @@ spec:
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: rclone-data
- name: mnt
persistentVolumeClaim:
claimName: rclone-data-pvc
claimName: mnt-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: sonarr4k
name: sonarr4k
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: sonarr4k
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: sonarr4k
spec:
nodeSelector:
role: master
containers:
- env:
- name: PGID
value: "1000"
- name: PUID
value: "1000"
- name: TZ
value: Asia/Kuwait
image: linuxserver/sonarr:latest
name: sonarr
ports:
- containerPort: 8989
protocol: TCP
volumeMounts:
- mountPath: /config
name: ssd
subPath: configs/trackers/sonarr4k_config
- mountPath: /tvshows
name: ssd
subPath: tvshows
- mountPath: /anime
name: ssd
subPath: anime
- mountPath: /HDD
name: hdd
- mountPath: data/downloads/complete
name: hdd
subPath: media/transmission/downloads/complete
- mountPath: /mnt
name: mnt
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: mnt
persistentVolumeClaim:
claimName: mnt-pvc

View file

@ -13,3 +13,19 @@ spec:
selector:
io.kompose.service: sonarr
---
apiVersion: v1
kind: Service
metadata:
labels:
io.kompose.service: sonarr4k
name: sonarr4k
spec:
type: ClusterIP
ports:
- name: "8989"
port: 8989
targetPort: 8989
selector:
io.kompose.service: sonarr4k

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: rclone-config
data:
rclone.conf: |
[webdav]
type = webdav
url = http://192.168.0.200:30999/dav/
vendor = other

View file

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zurg
spec:
replicas: 1
selector:
matchLabels:
app: zurg
template:
metadata:
labels:
app: zurg
spec:
containers:
- name: zurg
image: ghcr.io/debridmediamanager/zurg-testing:latest
ports:
- containerPort: 9999
volumeMounts:
- name: ssd
mountPath: /app/jellyfin_update.sh
subPath: configs/zurg/app/jellyfin_update.sh
- name: ssd
mountPath: /app/config.yml
subPath: configs/zurg/app/config.yml
- name: ssd
mountPath: /app/data
subPath: configs/zurg/app/data
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc

View file

@ -0,0 +1,16 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
app: zurg
name: zurg
spec:
type: NodePort
ports:
- name: "9999"
port: 9999
targetPort: 9999
nodePort: 30999
selector:
app: zurg

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: csi-rclone

View file

@ -0,0 +1,66 @@
# This YAML file contains RBAC API objects that are necessary to run external
# CSI attacher for rclone adapter
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-controller-rclone
namespace: csi-rclone
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-controller-rclone
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch", "update", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "create", "delete"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role-rclone
subjects:
- kind: ServiceAccount
name: csi-controller-rclone
namespace: csi-rclone
roleRef:
kind: ClusterRole
name: external-controller-rclone
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,69 @@
# This YAML file contains attacher & csi driver API objects that are necessary
# to run external CSI attacher for rclone
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-controller-rclone
namespace: csi-rclone
spec:
serviceName: "csi-controller-rclone"
replicas: 1
selector:
matchLabels:
app: csi-controller-rclone
template:
metadata:
labels:
app: csi-controller-rclone
spec:
serviceAccountName: csi-controller-rclone
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.2
args:
- "--csi-address=$(ADDRESS)"
- "--extra-create-metadata"
# - "--leader-election"
- "--v=1"
env:
- name: ADDRESS
value: /plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
# - "--leader-election"
env:
- name: ADDRESS
value: /plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
- name: rclone
image: wunderio/csi-rclone:v3.0.0
args :
- "/bin/csi-rclone-plugin"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=1"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
volumes:
- name: socket-dir
emptyDir: {}

View file

@ -0,0 +1,8 @@
# this should be deregistered once the controller stops
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi-rclone
spec:
attachRequired: true
podInfoOnMount: true

View file

@ -0,0 +1,40 @@
# This YAML defines all API objects to create RBAC roles for CSI node plugin
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin-rclone
namespace: csi-rclone
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-rclone
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["secrets","secret"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-rclone
subjects:
- kind: ServiceAccount
name: csi-nodeplugin-rclone
namespace: csi-rclone
roleRef:
kind: ClusterRole
name: csi-nodeplugin-rclone
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,83 @@
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for rclone
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-nodeplugin-rclone
namespace: csi-rclone
spec:
selector:
matchLabels:
app: csi-nodeplugin-rclone
template:
metadata:
labels:
app: csi-nodeplugin-rclone
spec:
serviceAccountName: csi-nodeplugin-rclone
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/csi-rclone /registration/csi-rclone-reg.sock"]
args:
- --v=1
- --csi-address=/plugin/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-rclone/csi.sock
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: registration-dir
mountPath: /registration
- name: rclone
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: wunderio/csi-rclone:v3.0.0
args:
- "/bin/csi-rclone-plugin"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=1"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
imagePullPolicy: "Always"
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "mount -t fuse.rclone | while read -r mount; do umount $(echo $mount | awk '{print $3}') || true ; done"]
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
volumes:
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-rclone
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: DirectoryOrCreate
name: registration-dir

View file

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rclone
# You will need to delete storageclass to update this field
provisioner: csi-rclone
# parameters:
# pathPattern: "${.PVC.namespace}/${.PVC.annotations.csi-rclone/storage-path}"

View file

@ -0,0 +1,151 @@
#------------------------------------------------------#
# ███████╗ ██████╗██████╗ ██╗██████╗ ████████╗███████╗ #
# ██╔════╝██╔════╝██╔══██╗██║██╔══██╗╚══██╔══╝██╔════╝ #
# ███████╗██║ ██████╔╝██║██████╔╝ ██║ ███████╗ #
# ╚════██║██║ ██╔══██╗██║██╔═══╝ ██║ ╚════██║ #
# ███████║╚██████╗██║ ██║██║██║ ██║ ███████║ #
# ╚══════╝ ╚═════╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝ ╚══════╝ #
#------------------------------------------------------#
#--------#
# SERVER #
#--------#
SERVER_DOMAIN=askar.tv
#-------------------------------------------------------------------#
# PLEX - WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, PLEX REFRESH #
#-------------------------------------------------------------------#
# PLEX_HOST="https://askar.tv/"
# PLEX_METADATA_HOST="https://metadata.provider.plex.tv/"
# PLEX_SERVER_HOST=<plex_server_host>
# PLEX_SERVER_MACHINE_ID=<plex_server_machine_id>
# PLEX_SERVER_API_KEY=<plex_server_api_key>
# PLEX_SERVER_MOVIE_LIBRARY_ID=<plex_server_movie_library_id>
# PLEX_SERVER_TV_SHOW_LIBRARY_ID=<plex_server_tv_show_library_id>
# PLEX_SERVER_PATH=<plex_server_path>
#-------------------------------------------------------------------------#
# OVERSEERR - WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, RECLAIM SPACE #
#-------------------------------------------------------------------------#
OVERSEERR_HOST=r.askar.tv
OVERSEERR_API_KEY=d61488fd81c24cea9b465013e105f783
#------------------------------------------------------------------------------------#
# SONARR - BLACKHOLE, REPAIR, IMPORT TORRENT FOLDER, RECLAIM SPACE, ADD NEXT EPISODE #
#------------------------------------------------------------------------------------#
SONARR_HOST=sr.askar.tv
SONARR_API_KEY=d973448580d041b7ba7e576a7aed9b11
SONARR_ROOT_FOLDER=/mnt/unionfs/Media/TV
SONARR_HOST_4K=sr.askar.tv
SONARR_API_KEY_4K=d973448580d041b7ba7e576a7aed9b11
SONARR_ROOT_FOLDER_4K=/mnt/unionfs/Media/TV
SONARR_HOST_ANIME=sr.askar.tv
SONARR_API_KEY_ANIME=d973448580d041b7ba7e576a7aed9b11
SONARR_ROOT_FOLDER_ANIME=/mnt/unionfs/Media/Anime
# SONARR_HOST_MUX=<sonarr_host_mux>
# SONARR_API_KEY_MUX=<sonarr_api_key_mux>
# SONARR_ROOT_FOLDER_MUX=<sonarr_root_folder_mux>
#------------------------------------------------------------------#
# RADARR - BLACKHOLE, REPAIR, IMPORT TORRENT FOLDER, RECLAIM SPACE #
#------------------------------------------------------------------#
RADARR_HOST=rr.askar.tv
RADARR_API_KEY=5bdad7cc4f33443bafefb5e185b3e0e7
RADARR_ROOT_FOLDER=/mnt/unionfs/Media/Movies
RADARR_HOST_4K=rr.askar.tv
RADARR_API_KEY_4K=5bdad7cc4f33443bafefb5e185b3e0e7
RADARR_ROOT_FOLDER_4K=/mnt/unionfs/Media/Movies
#
# RADARR_HOST_ANIME=<radarr_host_anime>
# RADARR_API_KEY_ANIME=<radarr_api_key_anime>
# RADARR_ROOT_FOLDER_ANIME=<radarr_root_folder_anime>
#
# RADARR_HOST_MUX=<radarr_host_mux>
# RADARR_API_KEY_MUX=<radarr_api_key_mux>
# RADARR_ROOT_FOLDER_MUX=<radarr_root_folder_mux>
#--------------------------#
# TAUTULLI - RECLAIM SPACE #
#--------------------------#
# TAUTULLI_HOST=<tautulli_host>
# TAUTULLI_API_KEY=<tautulli_api_key>
#-------------------------------#
# REALDEBRID - BLACKHOLE, REPAIR #
#-------------------------------#
# REALDEBRID_ENABLED=false
# REALDEBRID_HOST="https://api.real-debrid.com/rest/1.0/"
# REALDEBRID_API_KEY=<realdebrid_api_key>
# REALDEBRID_MOUNT_TORRENTS_PATH=
#---------------------------#
# TORBOX - BLACKHOLE, REPAIR #
#---------------------------#
TORBOX_ENABLED=true
TORBOX_HOST="https://api.torbox.app/v1/api/"
TORBOX_API_KEY=bd719193-a038-47ba-b2af-c5aeb1593196
TORBOX_MOUNT_TORRENTS_PATH=/mnt/remote/torbox
#-----------------------#
# TRAKT - RECLAIM SPACE #
#-----------------------#
# TRAKT_API_KEY=<trakt_api_key>
#-------------------------------------#
# WATCHLIST - WATCHLIST, PLEX REQUEST #
#-------------------------------------#
# WATCHLIST_PLEX_PRODUCT="Plex Request Authentication"
# WATCHLIST_PLEX_VERSION="1.0.0"
# WATCHLIST_PLEX_CLIENT_IDENTIFIER="576101fc-b425-4685-91cb-5d3c1671fd2b"
#-----------------------#
# BLACKHOLE - BLACKHOLE #
#-----------------------#
BLACKHOLE_BASE_WATCH_PATH="/mnt/symlinks"
BLACKHOLE_RADARR_PATH="radarr"
BLACKHOLE_SONARR_PATH="sonarr"
BLACKHOLE_FAIL_IF_NOT_CACHED=true
BLACKHOLE_RD_MOUNT_REFRESH_SECONDS=200
BLACKHOLE_WAIT_FOR_TORRENT_TIMEOUT=60
BLACKHOLE_HISTORY_PAGE_SIZE=500
#-----------------------------------------------------------------------------------------------#
# DISCORD - BLACKHOLE, WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, MONITOR RAM, RECLAIM SPACE #
#-----------------------------------------------------------------------------------------------#
#
# DISCORD_ENABLED=false
# DISCORD_UPDATE_ENABLED=false
# DISCORD_WEBHOOK_URL=<discord_webhook_url>
#-----------------#
# REPAIR - REPAIR #
#-----------------#
REPAIR_REPAIR_INTERVAL="10m"
REPAIR_RUN_INTERVAL="1d"
#-----------------------#
# GENERAL CONFIGURATION #
#-----------------------#
PYTHONUNBUFFERED=TRUE
PUID=1000
PGID=1000
UMASK=002
DOCKER_NETWORK="scripts_default"
DOCKER_NETWORK_EXTERNAL=false

@ -0,0 +1 @@
Subproject commit 8c92686ce6ed50a1683d61168be9b193d21e6e67

View file

@ -0,0 +1,5 @@
[zurg]
type = webdav
url = http://192.168.0.200:9999/dav
vendor = other
pacer_min_sleep = 0

View file

@ -0,0 +1,48 @@
zurg: v1
token: HLUV4VFMVEAO5FOYZFXMEVYJZFRP2KKIWZF3XHTNKLKJQCZKO3CA
host: "[::]"
port: 9999
#username: sager
#password: lol
# proxy:
# concurrent_workers: 20
check_for_changes_every_secs: 10
repair_every_mins: 60
ignore_renames: false
retain_rd_torrent_name: false
retain_folder_name_extension: false
enable_repair: false
auto_delete_rar_torrents: false
get_torrents_count: 5000
# api_timeout_secs: 15
# download_timeout_secs: 10
# enable_download_mount: false
# rate_limit_sleep_secs: 6
# retries_until_failed: 2
# network_buffer_size: 4194304 # 4MB
serve_from_rclone: true
# verify_download_link: false
# force_ipv6: false
on_library_update: sh jellyfin_update.sh "$@"
#for windows comment the line above and uncomment the line below:
#on_library_update: '& powershell -ExecutionPolicy Bypass -File .\plex_update.ps1 --% "$args"'
directories:
anime:
group_order: 10
group: media
filters:
- regex: /\b[a-fA-F0-9]{8}\b/
- any_file_inside_regex: /\b[a-fA-F0-9]{8}\b/
tvshows:
group_order: 20
group: media
filters:
- has_episodes: true
movies:
group_order: 30
group: media
only_show_the_biggest_file: true
filters:
- regex: /.*/

View file

@ -0,0 +1,35 @@
#!/bin/bash
# JELLYFIN PARTIAL SCAN script or JELLYFIN UPDATE script
# When Zurg detects changes, it can trigger this script IF your config.yml contains:
# on_library_update: sh jellyfin_update.sh "$@"
jellyfin_url="https://askar.tv" # Replace with your Jellyfin server URL (e.g., http://127.0.0.1:8096)
api_key="0571ee7fc7cb4a31afd30bad268caff6" # Generate a personal API key in Jellyfin via Dashboard > API Keys
zurg_mount="/mnt/zurg" # Replace with your Zurg mount path as seen by the host
jellyfin_mount="/data/rclone" # Replace with the equivalent path seen by Jellyfin
# Function to convert paths
convert_path() {
local host_path="$1"
echo "${host_path/$zurg_mount/$jellyfin_mount}"
}
# Process each argument passed to the script
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo "Detected update on: $parsed_arg"
# Convert the path to match Jellyfin's view
modified_arg=$(convert_path "$zurg_mount/$parsed_arg")
echo "Absolute path for Jellyfin: $modified_arg"
# Send the update notification to Jellyfin
curl -X POST -H "X-Emby-Token: $api_key" -H "Content-Type: application/json" \
-d "{\"ItemPath\": \"$modified_arg\"}" \
"$jellyfin_url/Library/Media/Updated"
done
echo "All updated libraries refreshed"

View file

@ -0,0 +1,35 @@
#!/bin/bash
# PLEX PARTIAL SCAN script or PLEX UPDATE script
# When zurg detects changes, it can trigger this script IF your config.yml contains
# on_library_update: sh plex_update.sh "$@"
# docker compose exec zurg apk add libxml2-utils
# sudo apt install libxml2-utils
plex_url="http://<url>" # If you're using zurg inside a Docker container, by default it is 172.17.0.1:32400
token="<token>" # open Plex in a browser, open dev console and copy-paste this: window.localStorage.getItem("myPlexAccessToken")
zurg_mount="/mnt/zurg" # replace with your zurg mount path, ensure this is what Plex sees
# Get the list of section IDs
section_ids=$(curl -sLX GET "$plex_url/library/sections" -H "X-Plex-Token: $token" | xmllint --xpath "//Directory/@key" - | grep -o 'key="[^"]*"' | awk -F'"' '{print $2}')
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo $parsed_arg
modified_arg="$zurg_mount/$parsed_arg"
echo "Detected update on: $arg"
echo "Absolute path: $modified_arg"
for section_id in $section_ids
do
echo "Section ID: $section_id"
curl -G -H "X-Plex-Token: $token" --data-urlencode "path=$modified_arg" $plex_url/library/sections/$section_id/refresh
done
done
echo "All updated sections refreshed"
# credits to godver3, wasabipls

View file

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zurg
spec:
replicas: 1
selector:
matchLabels:
app: zurg
template:
metadata:
labels:
app: zurg
spec:
containers:
- name: zurg
image: ghcr.io/debridmediamanager/zurg-testing:latest
ports:
- containerPort: 9999
volumeMounts:
- name: ssd
mountPath: /app/jellyfin_update.sh
subPath: configs/zurg/app/jellyfin_update.sh
- name: ssd
mountPath: /app/config.yml
subPath: configs/zurg/app/config.yml
- name: ssd
mountPath: /app/data
subPath: configs/zurg/app/data
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc

View file

@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rdtclient
spec:
replicas: 1
selector:
matchLabels:
app: rdtclient
template:
metadata:
labels:
app: rdtclient
spec:
containers:
- name: rdtclient
image: rogerfar/rdtclient
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Asia/Kuwait
ports:
- containerPort: 6500
volumeMounts:
- name: hdd
mountPath: /data/downloads
subPath: transmission/downloads/complete/
- name: ssd
mountPath: /data/db
subPath: configs/rdtDB
- name: rclone-data
mountPath: /data/rclone/__all__
livenessProbe:
httpGet:
path: /
port: 6500
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 30
failureThreshold: 3
restartPolicy: Always
volumes:
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-configs-pvc
- name: rclone-data
hostPath:
path: /mnt/zurg/__all__/
type: Directory

View file

@ -0,0 +1,5 @@
[zurg]
type = webdav
url = http://192.168.0.200:9999/dav
vendor = other
pacer_min_sleep = 0

View file

@ -0,0 +1,5 @@
[zurg]
type = webdav
url = http://192.168.0.200:9999/dav
vendor = other
pacer_min_sleep = 0

View file

@ -0,0 +1,35 @@
#!/bin/bash
# JELLYFIN PARTIAL SCAN script or JELLYFIN UPDATE script
# When Zurg detects changes, it can trigger this script IF your config.yml contains:
# on_library_update: sh jellyfin_update.sh "$@"
jellyfin_url="https://askar.tv" # Replace with your Jellyfin server URL (e.g., http://127.0.0.1:8096)
api_key="0571ee7fc7cb4a31afd30bad268caff6" # Generate a personal API key in Jellyfin via Dashboard > API Keys
zurg_mount="/mnt/zurg" # Replace with your Zurg mount path as seen by the host
jellyfin_mount="/data/rclone" # Replace with the equivalent path seen by Jellyfin
# Function to convert paths
convert_path() {
local host_path="$1"
echo "${host_path/$zurg_mount/$jellyfin_mount}"
}
# Process each argument passed to the script
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo "Detected update on: $parsed_arg"
# Convert the path to match Jellyfin's view
modified_arg=$(convert_path "$zurg_mount/$parsed_arg")
echo "Absolute path for Jellyfin: $modified_arg"
# Send the update notification to Jellyfin
curl -X POST -H "X-Emby-Token: $api_key" -H "Content-Type: application/json" \
-d "{\"ItemPath\": \"$modified_arg\"}" \
"$jellyfin_url/Library/Media/Updated"
done
echo "All updated libraries refreshed"

View file

@ -0,0 +1,35 @@
#!/bin/bash
# PLEX PARTIAL SCAN script or PLEX UPDATE script
# When zurg detects changes, it can trigger this script IF your config.yml contains
# on_library_update: sh plex_update.sh "$@"
# docker compose exec zurg apk add libxml2-utils
# sudo apt install libxml2-utils
plex_url="http://<url>" # If you're using zurg inside a Docker container, by default it is 172.17.0.1:32400
token="<token>" # open Plex in a browser, open dev console and copy-paste this: window.localStorage.getItem("myPlexAccessToken")
zurg_mount="/mnt/zurg" # replace with your zurg mount path, ensure this is what Plex sees
# Get the list of section IDs
section_ids=$(curl -sLX GET "$plex_url/library/sections" -H "X-Plex-Token: $token" | xmllint --xpath "//Directory/@key" - | grep -o 'key="[^"]*"' | awk -F'"' '{print $2}')
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo $parsed_arg
modified_arg="$zurg_mount/$parsed_arg"
echo "Detected update on: $arg"
echo "Absolute path: $modified_arg"
for section_id in $section_ids
do
echo "Section ID: $section_id"
curl -G -H "X-Plex-Token: $token" --data-urlencode "path=$modified_arg" $plex_url/library/sections/$section_id/refresh
done
done
echo "All updated sections refreshed"
# credits to godver3, wasabipls

View file

@ -26,5 +26,14 @@ spec:
services:
- name: aichat
port: 3002
- match: Host(`offline.askar.tv`)
kind: Rule
middlewares:
- name : analytics
namespace: umami
services:
- name: offline
namespace: default
port: 7000
tls:
certResolver: le

View file

@ -59,3 +59,26 @@ subsets:
- ip: 192.168.0.200
ports:
- port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: offline
spec:
type: ClusterIP
ports:
- port: 7000
targetPort: 7000
---
apiVersion: v1
kind: Endpoints
metadata:
name: offline
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 7000

View file

@ -0,0 +1,10 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: offline
namespace: default
spec:
redirectRegex:
regex: "^.*"
replacement: "https://offline.askar.tv/"
permanent: false