debrid syslinks base

This commit is contained in:
nomadics9 2025-02-01 22:13:16 +03:00
parent ffc9d0ebd1
commit 8050884f51
No known key found for this signature in database
23 changed files with 764 additions and 0 deletions

1
.gitignore vendored
View file

@ -1,3 +1,4 @@
secrets
loadBalancer/middlewares/umami/
docker-compose.yml
dockerCompose/debrid-syslinks/torRclone/rclone.conf

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: csi-rclone

View file

@ -0,0 +1,66 @@
# This YAML file contains RBAC API objects that are necessary to run external
# CSI attacher for rclone adapter
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-controller-rclone
namespace: csi-rclone
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-controller-rclone
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch", "update", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "create", "delete"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "create", "update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role-rclone
subjects:
- kind: ServiceAccount
name: csi-controller-rclone
namespace: csi-rclone
roleRef:
kind: ClusterRole
name: external-controller-rclone
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,69 @@
# This YAML file contains attacher & csi driver API objects that are necessary
# to run external CSI attacher for rclone
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-controller-rclone
namespace: csi-rclone
spec:
serviceName: "csi-controller-rclone"
replicas: 1
selector:
matchLabels:
app: csi-controller-rclone
template:
metadata:
labels:
app: csi-controller-rclone
spec:
serviceAccountName: csi-controller-rclone
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.2
args:
- "--csi-address=$(ADDRESS)"
- "--extra-create-metadata"
# - "--leader-election"
- "--v=1"
env:
- name: ADDRESS
value: /plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
- name: csi-attacher
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
args:
- "--csi-address=$(ADDRESS)"
- "--v=1"
# - "--leader-election"
env:
- name: ADDRESS
value: /plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
- name: rclone
image: wunderio/csi-rclone:v3.0.0
args :
- "/bin/csi-rclone-plugin"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=1"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
imagePullPolicy: "Always"
volumeMounts:
- name: socket-dir
mountPath: /plugin
volumes:
- name: socket-dir
emptyDir: {}

View file

@ -0,0 +1,8 @@
# this should be deregistered once the controller stops
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi-rclone
spec:
attachRequired: true
podInfoOnMount: true

View file

@ -0,0 +1,40 @@
# This YAML defines all API objects to create RBAC roles for CSI node plugin
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-nodeplugin-rclone
namespace: csi-rclone
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-rclone
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["secrets","secret"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-rclone
subjects:
- kind: ServiceAccount
name: csi-nodeplugin-rclone
namespace: csi-rclone
roleRef:
kind: ClusterRole
name: csi-nodeplugin-rclone
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,83 @@
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for rclone
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-nodeplugin-rclone
namespace: csi-rclone
spec:
selector:
matchLabels:
app: csi-nodeplugin-rclone
template:
metadata:
labels:
app: csi-nodeplugin-rclone
spec:
serviceAccountName: csi-nodeplugin-rclone
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: node-driver-registrar
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/csi-rclone /registration/csi-rclone-reg.sock"]
args:
- --v=1
- --csi-address=/plugin/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-rclone/csi.sock
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: registration-dir
mountPath: /registration
- name: rclone
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: wunderio/csi-rclone:v3.0.0
args:
- "/bin/csi-rclone-plugin"
- "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"
- "--v=1"
env:
- name: NODE_ID
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_ENDPOINT
value: unix://plugin/csi.sock
imagePullPolicy: "Always"
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "mount -t fuse.rclone | while read -r mount; do umount $(echo $mount | awk '{print $3}') || true ; done"]
volumeMounts:
- name: plugin-dir
mountPath: /plugin
- name: pods-mount-dir
mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional"
volumes:
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi-rclone
type: DirectoryOrCreate
- name: pods-mount-dir
hostPath:
path: /var/lib/kubelet/pods
type: Directory
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: DirectoryOrCreate
name: registration-dir

View file

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rclone
# You will need to delete storageclass to update this field
provisioner: csi-rclone
# parameters:
# pathPattern: "${.PVC.namespace}/${.PVC.annotations.csi-rclone/storage-path}"

View file

@ -0,0 +1,151 @@
#------------------------------------------------------#
# ███████╗ ██████╗██████╗ ██╗██████╗ ████████╗███████╗ #
# ██╔════╝██╔════╝██╔══██╗██║██╔══██╗╚══██╔══╝██╔════╝ #
# ███████╗██║ ██████╔╝██║██████╔╝ ██║ ███████╗ #
# ╚════██║██║ ██╔══██╗██║██╔═══╝ ██║ ╚════██║ #
# ███████║╚██████╗██║ ██║██║██║ ██║ ███████║ #
# ╚══════╝ ╚═════╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝ ╚══════╝ #
#------------------------------------------------------#
#--------#
# SERVER #
#--------#
SERVER_DOMAIN=askar.tv
#-------------------------------------------------------------------#
# PLEX - WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, PLEX REFRESH #
#-------------------------------------------------------------------#
# PLEX_HOST="https://askar.tv/"
# PLEX_METADATA_HOST="https://metadata.provider.plex.tv/"
# PLEX_SERVER_HOST=<plex_server_host>
# PLEX_SERVER_MACHINE_ID=<plex_server_machine_id>
# PLEX_SERVER_API_KEY=<plex_server_api_key>
# PLEX_SERVER_MOVIE_LIBRARY_ID=<plex_server_movie_library_id>
# PLEX_SERVER_TV_SHOW_LIBRARY_ID=<plex_server_tv_show_library_id>
# PLEX_SERVER_PATH=<plex_server_path>
#-------------------------------------------------------------------------#
# OVERSEERR - WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, RECLAIM SPACE #
#-------------------------------------------------------------------------#
OVERSEERR_HOST=r.askar.tv
OVERSEERR_API_KEY=d61488fd81c24cea9b465013e105f783
#------------------------------------------------------------------------------------#
# SONARR - BLACKHOLE, REPAIR, IMPORT TORRENT FOLDER, RECLAIM SPACE, ADD NEXT EPISODE #
#------------------------------------------------------------------------------------#
SONARR_HOST=sr.askar.tv
SONARR_API_KEY=d973448580d041b7ba7e576a7aed9b11
SONARR_ROOT_FOLDER=/mnt/unionfs/Media/TV
SONARR_HOST_4K=sr.askar.tv
SONARR_API_KEY_4K=d973448580d041b7ba7e576a7aed9b11
SONARR_ROOT_FOLDER_4K=/mnt/unionfs/Media/TV
SONARR_HOST_ANIME=sr.askar.tv
SONARR_API_KEY_ANIME=d973448580d041b7ba7e576a7aed9b11
SONARR_ROOT_FOLDER_ANIME=/mnt/unionfs/Media/Anime
# SONARR_HOST_MUX=<sonarr_host_mux>
# SONARR_API_KEY_MUX=<sonarr_api_key_mux>
# SONARR_ROOT_FOLDER_MUX=<sonarr_root_folder_mux>
#------------------------------------------------------------------#
# RADARR - BLACKHOLE, REPAIR, IMPORT TORRENT FOLDER, RECLAIM SPACE #
#------------------------------------------------------------------#
RADARR_HOST=rr.askar.tv
RADARR_API_KEY=5bdad7cc4f33443bafefb5e185b3e0e7
RADARR_ROOT_FOLDER=/mnt/unionfs/Media/Movies
RADARR_HOST_4K=rr.askar.tv
RADARR_API_KEY_4K=5bdad7cc4f33443bafefb5e185b3e0e7
RADARR_ROOT_FOLDER_4K=/mnt/unionfs/Media/Movies
#
# RADARR_HOST_ANIME=<radarr_host_anime>
# RADARR_API_KEY_ANIME=<radarr_api_key_anime>
# RADARR_ROOT_FOLDER_ANIME=<radarr_root_folder_anime>
#
# RADARR_HOST_MUX=<radarr_host_mux>
# RADARR_API_KEY_MUX=<radarr_api_key_mux>
# RADARR_ROOT_FOLDER_MUX=<radarr_root_folder_mux>
#--------------------------#
# TAUTULLI - RECLAIM SPACE #
#--------------------------#
# TAUTULLI_HOST=<tautulli_host>
# TAUTULLI_API_KEY=<tautulli_api_key>
#-------------------------------#
# REALDEBRID - BLACKHOLE, REPAIR #
#-------------------------------#
# REALDEBRID_ENABLED=false
# REALDEBRID_HOST="https://api.real-debrid.com/rest/1.0/"
# REALDEBRID_API_KEY=<realdebrid_api_key>
# REALDEBRID_MOUNT_TORRENTS_PATH=
#---------------------------#
# TORBOX - BLACKHOLE, REPAIR #
#---------------------------#
TORBOX_ENABLED=true
TORBOX_HOST="https://api.torbox.app/v1/api/"
TORBOX_API_KEY=bd719193-a038-47ba-b2af-c5aeb1593196
TORBOX_MOUNT_TORRENTS_PATH=/mnt/remote/torbox
#-----------------------#
# TRAKT - RECLAIM SPACE #
#-----------------------#
# TRAKT_API_KEY=<trakt_api_key>
#-------------------------------------#
# WATCHLIST - WATCHLIST, PLEX REQUEST #
#-------------------------------------#
# WATCHLIST_PLEX_PRODUCT="Plex Request Authentication"
# WATCHLIST_PLEX_VERSION="1.0.0"
# WATCHLIST_PLEX_CLIENT_IDENTIFIER="576101fc-b425-4685-91cb-5d3c1671fd2b"
#-----------------------#
# BLACKHOLE - BLACKHOLE #
#-----------------------#
BLACKHOLE_BASE_WATCH_PATH="/mnt/symlinks"
BLACKHOLE_RADARR_PATH="radarr"
BLACKHOLE_SONARR_PATH="sonarr"
BLACKHOLE_FAIL_IF_NOT_CACHED=true
BLACKHOLE_RD_MOUNT_REFRESH_SECONDS=200
BLACKHOLE_WAIT_FOR_TORRENT_TIMEOUT=60
BLACKHOLE_HISTORY_PAGE_SIZE=500
#-----------------------------------------------------------------------------------------------#
# DISCORD - BLACKHOLE, WATCHLIST, PLEX AUTHENTICATION, PLEX REQUEST, MONITOR RAM, RECLAIM SPACE #
#-----------------------------------------------------------------------------------------------#
#
# DISCORD_ENABLED=false
# DISCORD_UPDATE_ENABLED=false
# DISCORD_WEBHOOK_URL=<discord_webhook_url>
#-----------------#
# REPAIR - REPAIR #
#-----------------#
REPAIR_REPAIR_INTERVAL="10m"
REPAIR_RUN_INTERVAL="1d"
#-----------------------#
# GENERAL CONFIGURATION #
#-----------------------#
PYTHONUNBUFFERED=TRUE
PUID=1000
PGID=1000
UMASK=002
DOCKER_NETWORK="scripts_default"
DOCKER_NETWORK_EXTERNAL=false

@ -0,0 +1 @@
Subproject commit 8c92686ce6ed50a1683d61168be9b193d21e6e67

View file

@ -0,0 +1,5 @@
[zurg]
type = webdav
url = http://192.168.0.200:9999/dav
vendor = other
pacer_min_sleep = 0

View file

@ -0,0 +1,48 @@
zurg: v1
token: HLUV4VFMVEAO5FOYZFXMEVYJZFRP2KKIWZF3XHTNKLKJQCZKO3CA
host: "[::]"
port: 9999
#username: sager
#password: lol
# proxy:
# concurrent_workers: 20
check_for_changes_every_secs: 10
repair_every_mins: 60
ignore_renames: false
retain_rd_torrent_name: false
retain_folder_name_extension: false
enable_repair: false
auto_delete_rar_torrents: false
get_torrents_count: 5000
# api_timeout_secs: 15
# download_timeout_secs: 10
# enable_download_mount: false
# rate_limit_sleep_secs: 6
# retries_until_failed: 2
# network_buffer_size: 4194304 # 4MB
serve_from_rclone: true
# verify_download_link: false
# force_ipv6: false
on_library_update: sh jellyfin_update.sh "$@"
#for windows comment the line above and uncomment the line below:
#on_library_update: '& powershell -ExecutionPolicy Bypass -File .\plex_update.ps1 --% "$args"'
directories:
anime:
group_order: 10
group: media
filters:
- regex: /\b[a-fA-F0-9]{8}\b/
- any_file_inside_regex: /\b[a-fA-F0-9]{8}\b/
tvshows:
group_order: 20
group: media
filters:
- has_episodes: true
movies:
group_order: 30
group: media
only_show_the_biggest_file: true
filters:
- regex: /.*/

View file

@ -0,0 +1,35 @@
#!/bin/bash
# JELLYFIN PARTIAL SCAN script or JELLYFIN UPDATE script
# When Zurg detects changes, it can trigger this script IF your config.yml contains:
# on_library_update: sh jellyfin_update.sh "$@"
jellyfin_url="https://askar.tv" # Replace with your Jellyfin server URL (e.g., http://127.0.0.1:8096)
api_key="0571ee7fc7cb4a31afd30bad268caff6" # Generate a personal API key in Jellyfin via Dashboard > API Keys
zurg_mount="/mnt/zurg" # Replace with your Zurg mount path as seen by the host
jellyfin_mount="/data/rclone" # Replace with the equivalent path seen by Jellyfin
# Function to convert paths
convert_path() {
local host_path="$1"
echo "${host_path/$zurg_mount/$jellyfin_mount}"
}
# Process each argument passed to the script
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo "Detected update on: $parsed_arg"
# Convert the path to match Jellyfin's view
modified_arg=$(convert_path "$zurg_mount/$parsed_arg")
echo "Absolute path for Jellyfin: $modified_arg"
# Send the update notification to Jellyfin
curl -X POST -H "X-Emby-Token: $api_key" -H "Content-Type: application/json" \
-d "{\"ItemPath\": \"$modified_arg\"}" \
"$jellyfin_url/Library/Media/Updated"
done
echo "All updated libraries refreshed"

View file

@ -0,0 +1,35 @@
#!/bin/bash
# PLEX PARTIAL SCAN script or PLEX UPDATE script
# When zurg detects changes, it can trigger this script IF your config.yml contains
# on_library_update: sh plex_update.sh "$@"
# docker compose exec zurg apk add libxml2-utils
# sudo apt install libxml2-utils
plex_url="http://<url>" # If you're using zurg inside a Docker container, by default it is 172.17.0.1:32400
token="<token>" # open Plex in a browser, open dev console and copy-paste this: window.localStorage.getItem("myPlexAccessToken")
zurg_mount="/mnt/zurg" # replace with your zurg mount path, ensure this is what Plex sees
# Get the list of section IDs
section_ids=$(curl -sLX GET "$plex_url/library/sections" -H "X-Plex-Token: $token" | xmllint --xpath "//Directory/@key" - | grep -o 'key="[^"]*"' | awk -F'"' '{print $2}')
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo $parsed_arg
modified_arg="$zurg_mount/$parsed_arg"
echo "Detected update on: $arg"
echo "Absolute path: $modified_arg"
for section_id in $section_ids
do
echo "Section ID: $section_id"
curl -G -H "X-Plex-Token: $token" --data-urlencode "path=$modified_arg" $plex_url/library/sections/$section_id/refresh
done
done
echo "All updated sections refreshed"
# credits to godver3, wasabipls

View file

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zurg
spec:
replicas: 1
selector:
matchLabels:
app: zurg
template:
metadata:
labels:
app: zurg
spec:
containers:
- name: zurg
image: ghcr.io/debridmediamanager/zurg-testing:latest
ports:
- containerPort: 9999
volumeMounts:
- name: ssd
mountPath: /app/jellyfin_update.sh
subPath: configs/zurg/app/jellyfin_update.sh
- name: ssd
mountPath: /app/config.yml
subPath: configs/zurg/app/config.yml
- name: ssd
mountPath: /app/data
subPath: configs/zurg/app/data
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc

View file

@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rdtclient
spec:
replicas: 1
selector:
matchLabels:
app: rdtclient
template:
metadata:
labels:
app: rdtclient
spec:
containers:
- name: rdtclient
image: rogerfar/rdtclient
env:
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Asia/Kuwait
ports:
- containerPort: 6500
volumeMounts:
- name: hdd
mountPath: /data/downloads
subPath: transmission/downloads/complete/
- name: ssd
mountPath: /data/db
subPath: configs/rdtDB
- name: rclone-data
mountPath: /data/rclone/__all__
livenessProbe:
httpGet:
path: /
port: 6500
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 30
failureThreshold: 3
restartPolicy: Always
volumes:
- name: hdd
persistentVolumeClaim:
claimName: nfs-hdd-pvc
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-configs-pvc
- name: rclone-data
hostPath:
path: /mnt/zurg/__all__/
type: Directory

View file

@ -0,0 +1,5 @@
[zurg]
type = webdav
url = http://192.168.0.200:9999/dav
vendor = other
pacer_min_sleep = 0

View file

@ -0,0 +1,5 @@
[zurg]
type = webdav
url = http://192.168.0.200:9999/dav
vendor = other
pacer_min_sleep = 0

View file

@ -0,0 +1,35 @@
#!/bin/bash
# JELLYFIN PARTIAL SCAN script or JELLYFIN UPDATE script
# When Zurg detects changes, it can trigger this script IF your config.yml contains:
# on_library_update: sh jellyfin_update.sh "$@"
jellyfin_url="https://askar.tv" # Replace with your Jellyfin server URL (e.g., http://127.0.0.1:8096)
api_key="0571ee7fc7cb4a31afd30bad268caff6" # Generate a personal API key in Jellyfin via Dashboard > API Keys
zurg_mount="/mnt/zurg" # Replace with your Zurg mount path as seen by the host
jellyfin_mount="/data/rclone" # Replace with the equivalent path seen by Jellyfin
# Function to convert paths
convert_path() {
local host_path="$1"
echo "${host_path/$zurg_mount/$jellyfin_mount}"
}
# Process each argument passed to the script
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo "Detected update on: $parsed_arg"
# Convert the path to match Jellyfin's view
modified_arg=$(convert_path "$zurg_mount/$parsed_arg")
echo "Absolute path for Jellyfin: $modified_arg"
# Send the update notification to Jellyfin
curl -X POST -H "X-Emby-Token: $api_key" -H "Content-Type: application/json" \
-d "{\"ItemPath\": \"$modified_arg\"}" \
"$jellyfin_url/Library/Media/Updated"
done
echo "All updated libraries refreshed"

View file

@ -0,0 +1,35 @@
#!/bin/bash
# PLEX PARTIAL SCAN script or PLEX UPDATE script
# When zurg detects changes, it can trigger this script IF your config.yml contains
# on_library_update: sh plex_update.sh "$@"
# docker compose exec zurg apk add libxml2-utils
# sudo apt install libxml2-utils
plex_url="http://<url>" # If you're using zurg inside a Docker container, by default it is 172.17.0.1:32400
token="<token>" # open Plex in a browser, open dev console and copy-paste this: window.localStorage.getItem("myPlexAccessToken")
zurg_mount="/mnt/zurg" # replace with your zurg mount path, ensure this is what Plex sees
# Get the list of section IDs
section_ids=$(curl -sLX GET "$plex_url/library/sections" -H "X-Plex-Token: $token" | xmllint --xpath "//Directory/@key" - | grep -o 'key="[^"]*"' | awk -F'"' '{print $2}')
for arg in "$@"
do
parsed_arg="${arg//\\}"
echo $parsed_arg
modified_arg="$zurg_mount/$parsed_arg"
echo "Detected update on: $arg"
echo "Absolute path: $modified_arg"
for section_id in $section_ids
do
echo "Section ID: $section_id"
curl -G -H "X-Plex-Token: $token" --data-urlencode "path=$modified_arg" $plex_url/library/sections/$section_id/refresh
done
done
echo "All updated sections refreshed"
# credits to godver3, wasabipls

View file

@ -26,5 +26,14 @@ spec:
services:
- name: aichat
port: 3002
- match: Host(`offline.askar.tv`)
kind: Rule
middlewares:
- name : analytics
namespace: umami
services:
- name: offline
namespace: default
port: 7000
tls:
certResolver: le

View file

@ -59,3 +59,26 @@ subsets:
- ip: 192.168.0.200
ports:
- port: 3002
---
apiVersion: v1
kind: Service
metadata:
name: offline
spec:
type: ClusterIP
ports:
- port: 7000
targetPort: 7000
---
apiVersion: v1
kind: Endpoints
metadata:
name: offline
subsets:
- addresses:
- ip: 192.168.0.200
ports:
- port: 7000

View file

@ -0,0 +1,10 @@
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: offline
namespace: default
spec:
redirectRegex:
regex: "^.*"
replacement: "https://offline.askar.tv/"
permanent: false