This commit is contained in:
nomadics9 2025-04-17 16:04:17 +03:00
parent e1d4c89fc6
commit b21b925890
No known key found for this signature in database
11 changed files with 178 additions and 16 deletions

View file

@ -41,7 +41,7 @@ spec:
image: nomadics/alaskarfin:latest
resources:
requests:
cpu: "4"
cpu: "1"
limits:
cpu: "5.5"
name: jellyfin
@ -72,6 +72,9 @@ spec:
- mountPath: /mnt/zurg/__all__
subPath: __all__
name: rclone-data
- mountPath: /mnt/torbox
name: rclone-data-torbox
- name: render-d128
mountPath: /dev/dri/renderD128
restartPolicy: Always
@ -89,4 +92,7 @@ spec:
hostPath:
path: /dev/dri/renderD128
- name: rclone-data-torbox
persistentVolumeClaim:
claimName: rclone-data-torbox-pvc

View file

@ -76,6 +76,8 @@ spec:
- mountPath: /mnt/zurg/__all__
subPath: __all__
name: rclone-data
- mountPath: /mnt/torbox/
name: rclone-data-torbox
- name: render-d128
mountPath: /dev/dri/renderD128
restartPolicy: Always
@ -90,6 +92,9 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: rclone-data-torbox
persistentVolumeClaim:
claimName: rclone-data-torbox-pvc
- name: render-d128
hostPath:
path: /dev/dri/renderD128

View file

@ -57,6 +57,8 @@ spec:
- mountPath: /mnt/zurg/__all__
name: rclone-data
subPath: __all__
- mountPath: /mnt/torbox
name: rclone-data-torbox
restartPolicy: Always
volumes:
- name: ssd
@ -68,6 +70,9 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: rclone-data-torbox
persistentVolumeClaim:
claimName: rclone-data-torbox-pvc
---
apiVersion: apps/v1

View file

@ -9,7 +9,7 @@ metadata:
io.kompose.service: suggestarr
name: suggestarr
spec:
replicas: 1
replicas: 0
selector:
matchLabels:
io.kompose.service: suggestarr

View file

@ -8,10 +8,13 @@ spec:
- web
- websecure
routes:
# - match: Host(`askar.tv`) && !PathPrefix(`/Videos/Transcode`) || Host(`www.askar.tv`) && !PathPrefix(`/Videos/Transcode`)
- match: Host(`askar.tv`) || Host(`www.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
# - name : extract-device-id
# - name : set-device-cookie
- name : analytics
namespace: umami
services:
@ -19,15 +22,15 @@ spec:
port: 8096
# - name: jellyfin-lb
# kind: TraefikService
- match: Host(`gpu.askar.tv`)
kind: Rule
middlewares:
- name : https-redirect
- name : analytics
namespace: umami
services:
- name: jellyfin-master
port: 8096
# - match: Host(`askar.tv`) && PathPrefix(`/Videos/Transcode`) || Host(`www.askar.tv`) && PathPrefix(`/Videos/Transcode`)
# kind: Rule
# middlewares:
# - name : https-redirect
# - name : analytics
# namespace: umami
# services:
# - name: jellyfin-master
# port: 8096
- match: Host(`cpu.askar.tv`)
kind: Rule
middlewares:

View file

@ -7,7 +7,10 @@ spec:
weighted:
sticky:
cookie:
name: jellyfin-session
name: "jellyfin-session"
httpOnly: true
secure: true
sameSite: none
services:
- name: jellyfin-worker
port: 8096

View file

@ -0,0 +1,45 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
keel.sh/policy: minor
keel.sh/trigger: poll
keel.sh/pollSchedule: "@every 6h"
labels:
io.kompose.service: meilisearch
name: meilisearch
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: meilisearch
strategy:
type: Recreate
template:
metadata:
labels:
io.kompose.service: meilisearch
spec:
nodeSelector:
role: worker
containers:
- env:
- name: MEILI_MASTER_KEY
value: "ObN9OEH6QYm6C1sS2gQl57X6XhTQsA4eK72flUXvm8"
image: getmeili/meilisearch:latest
name: meilisearch
volumeMounts:
- mountPath: /meili_data
name: ssd
subPath: configs/meillisearch
- mountPath: /config/data/
name: ssd
subPath: configs/jellyarr/jellyfin_config/data/
restartPolicy: Always
volumes:
- name: ssd
persistentVolumeClaim:
claimName: nfs-ssd-pvc
---

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: meilisearch
spec:
selector:
io.kompose.service: meilisearch
ports:
- protocol: TCP
port: 7700 # Default Meilisearch HTTP port
targetPort: 7700 # Port on the container
type: ClusterIP # or ClusterIP depending on the use case

View file

@ -27,7 +27,12 @@ spec:
nfs:
server: 192.168.0.200
path: /HDD
# csi:
# driver: nfs.csi.k8s.io
# volumeHandle: nfs-hdd
# volumeAttributes:
# server: 192.168.0.200
# share: /HDD
---
apiVersion: v1
kind: PersistentVolume
@ -71,6 +76,48 @@ spec:
vfs-read-chunk-size-limit: "32M"
vfs-refresh: "true"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: rclone-data-torbox
spec:
capacity:
storage: 10Ti
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: "rclone-tor"
csi:
driver: csi-rclone
volumeHandle: rclone-data-torbox
volumeAttributes:
remote: "webdav"
remotePath: "/"
webdav-url: "https://webdav.torbox.app/"
webdav-user: "binaskar9@gmail.com"
webdav-pass: "u06OwwL0ujAhTlUrAGR8o9cFjbcC5LOwjCLZaQ"
allow-other: "true"
allow-non-empty: "true"
async-read: "true"
buffer-size: "48M"
dir-cache-time: "15s"
cache-dir: "/mnt/rclone-cache-tor"
dir-permissions: "0775"
file-permissions: "0664"
gid: "1000"
log-level: "INFO"
poll-interval: "15s"
timeout: "10m"
uid: "1000"
use-mmap: "true"
vfs-cache-max-age: "672h"
vfs-cache-max-size: "5G"
vfs-cache-mode: "full"
vfs-cache-poll-interval: "15s"
vfs-fast-fingerprint: "true"
vfs-read-ahead: "96M"
vfs-read-chunk-size: "32M"
vfs-read-chunk-size-limit: "32M"
---
apiVersion: v1
@ -103,7 +150,23 @@ spec:
storage: 6Ti
# volumeName: rclone-data
storageClassName: "rclone"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rclone-data-torbox-pvc
annotations:
csi-rclone/storage-path: "/"
csi-rclone/remote: "webdav"
csi-rclone/umask: "022"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 6Ti
# volumeName: rclone-data
storageClassName: "rclone-tor"
---
apiVersion: v1
kind: PersistentVolumeClaim

View file

@ -53,6 +53,8 @@ spec:
- mountPath: /mnt/zurg/__all__
subPath: __all__
name: rclone-data
- mountPath: /mnt/torbox
name: rclone-data-torbox
restartPolicy: Always
volumes:
- name: ssd
@ -64,6 +66,10 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: rclone-data-torbox
persistentVolumeClaim:
claimName: rclone-data-torbox-pvc
---
apiVersion: apps/v1
@ -121,6 +127,8 @@ spec:
- mountPath: /mnt/zurg/__all__
subPath: __all__
name: rclone-data
- mountPath: /mnt/torbox
name: rclone-data-torbox
restartPolicy: Always
volumes:
- name: ssd
@ -132,3 +140,6 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: rclone-data-torbox
persistentVolumeClaim:
claimName: rclone-data-torbox-pvc

View file

@ -51,6 +51,8 @@ spec:
- mountPath: /mnt/zurg/__all__
subPath: __all__
name: rclone-data
- mountPath: /mnt/torbox
name: rclone-data-torbox
restartPolicy: Always
volumes:
- name: ssd
@ -62,7 +64,9 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: rclone-data-torbox
persistentVolumeClaim:
claimName: rclone-data-torbox-pvc
---
apiVersion: apps/v1
kind: Deployment
@ -117,6 +121,8 @@ spec:
- mountPath: /mnt/zurg/__all__
subPath: __all__
name: rclone-data
- mountPath: /mnt/torbox
name: rclone-data-torbox
restartPolicy: Always
volumes:
- name: ssd
@ -128,4 +134,6 @@ spec:
- name: rclone-data
persistentVolumeClaim:
claimName: rclone-data-pvc
- name: rclone-data-torbox
persistentVolumeClaim:
claimName: rclone-data-torbox-pvc