diff --git a/.github/workflows/chart.yaml b/.github/workflows/chart.yaml index a902442..d190237 100644 --- a/.github/workflows/chart.yaml +++ b/.github/workflows/chart.yaml @@ -2,10 +2,9 @@ name: Release Charts on: push: - branches: - - master tags: - 'v*.*.*' + - 'chart-v*.*.*' jobs: release: @@ -54,7 +53,7 @@ jobs: shell: bash working-directory: dest run: | - helm repo index . --url https://pabloromeo.github.io/clusterplex + helm repo index --url https://pabloromeo.github.io/clusterplex --merge index.yaml . - name: Commit changes uses: stefanzweifel/git-auto-commit-action@v4 diff --git a/.github/workflows/lscr.yml b/.github/workflows/lscr.yml index 3424ec3..c260147 100644 --- a/.github/workflows/lscr.yml +++ b/.github/workflows/lscr.yml @@ -12,15 +12,19 @@ on: - 'experimental' tags: - 'v*.*.*' + paths-ignore: + - 'charts/**' pull_request: branches: - 'master' - 'dev' + paths-ignore: + - 'charts/**' -permissions: +permissions: contents: read packages: write - + # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: build-pms-docker-mod: @@ -29,7 +33,7 @@ jobs: # Get the repositery's code - name: Checkout uses: actions/checkout@v3 - + # https://github.com/docker/setup-buildx-action - name: Set up Docker Buildx id: buildx @@ -90,7 +94,7 @@ jobs: # Get the repositery's code - name: Checkout uses: actions/checkout@v3 - + # https://github.com/docker/setup-buildx-action - name: Set up Docker Buildx id: buildx diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3d94df2..90de744 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -14,10 +14,14 @@ on: - "experimental" tags: - "v*.*.*" + paths-ignore: + - 'charts/**' pull_request: branches: - "master" - "dev" + paths-ignore: + - 'charts/**' permissions: contents: read diff --git a/charts/clusterplex/Chart.yaml b/charts/clusterplex/Chart.yaml index a0aee98..6d48d0e 100644 --- a/charts/clusterplex/Chart.yaml +++ b/charts/clusterplex/Chart.yaml @@ -12,5 +12,5 @@ sources: - https://github.com/pabloromeo/clusterplex - https://github.com/linuxserver/docker-plex - https://plex.tv -version: 1.0.1 +version: 1.1.0 appVersion: 1.4.5 \ No newline at end of file diff --git a/charts/clusterplex/README.md b/charts/clusterplex/README.md index 5e95014..c3b1ad9 100644 --- a/charts/clusterplex/README.md +++ b/charts/clusterplex/README.md @@ -1,6 +1,6 @@ # clusterplex -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.4](https://img.shields.io/badge/AppVersion-1.4.4-informational?style=flat-square) +![Version: 1.1.0](https://img.shields.io/badge/Version-1.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.5](https://img.shields.io/badge/AppVersion-1.4.5-informational?style=flat-square) ClusterPlex is basically an extended version of Plex, which supports distributed Workers across a cluster to handle transcoding requests. @@ -67,11 +67,13 @@ $ helm install clusterplex clusterplex/clusterplex | pms.env | string | `nil` | Additional environment variables. Template enabled. Syntax options: A) TZ: UTC B) PASSWD: '{{ .Release.Name }}' C) PASSWD: configMapKeyRef: name: config-map-name key: key-name D) PASSWD: valueFrom: secretKeyRef: name: secret-name key: key-name ... E) - name: TZ value: UTC F) - name: TZ value: '{{ .Release.Name }}' | | pms.config | object | See below | Supply the configuration items used to configure the PMS component | | pms.config.transcoderVerbose | int | `1` | Set this to 1 if you want only info logging from the transcoder or 0 if you want debugging logs | -| pms.config.transcodeOperatingMode | string | `"both"` | Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). You MUST set this to local if you disable the worker installation. | +| pms.config.transcodeOperatingMode | string | `"both"` | Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). If you disable the worker then this will be set to local automatically as that is the only valid option for that confguration. | | pms.config.plexClaimToken | string | `nil` | Set the Plex claim token obtained from https://plex.tv/claim | | pms.config.version | string | `"docker"` | Set the version of Plex to use. Valid options are docker, latest, public, or a specific version. [[ref](https://github.com/linuxserver/docker-plex#application-setup)] | | pms.config.port | int | `32400` | The port that Plex will listen on | +| pms.config.localRelayEnabled | bool | `true` | Enable or disable the local relay function. In most cases this should be left to the default (true). If you disable this, you must add the pod IP address of each worker or the pod network CIDR to Plex under the `List of IP addresses and networks that are allowed without auth` option in Plex's network configuration. | | pms.config.relayPort | int | `32499` | The port that the relay service will listen on | +| pms.config.pmsIP | string | `""` | The IP address that plex is using. This is only utilized if you disable the localRelayEnabled option above. | | pms.serviceConfig | object | See below | Configure the kubernetes service associated with the the PMS component | | pms.serviceConfig.externalTrafficPolicy | string | `nil` | Specify the externalTrafficPolicy for the service. Options: Cluster, Local [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] | | pms.serviceConfig.annotations | object | `{}` | Provide additional annotations which may be required. | diff --git a/charts/clusterplex/custom-values.yaml b/charts/clusterplex/custom-values.yaml new file mode 100644 index 0000000..8fe7474 --- /dev/null +++ b/charts/clusterplex/custom-values.yaml @@ -0,0 +1,508 @@ +global: + # -- Configure the plex image that will be used for the PMS and Worker components + # @default -- See below + plexImage: + # -- The image that will be used + repository: linuxserver/plex + + # -- The image tag to use + tag: latest + + # -- Defines when the image should be pulled. Options are Always (default), IfNotPresent, and Never + imagePullPolicy: Always + + # -- The CluterPlex version of docker mod images to pull + # @default -- The appVersion for this chart + clusterplexVersion: + + # -- The timezone configured for each pod + timezone: America/Chicago + + # -- The process group ID that the LinuxServer Plex container will run Plex/Worker as. + PGID: 1000 + + # -- The process user ID that the LinuxServer Plex container will run Plex/Worker as. + PUID: 1000 + + sharedStorage: + # -- Configure the volume that will be mounted to the PMS and worker pods for a shared location for transcoding files. + # @default -- See below + transcode: + # -- Enable or disable the transcode PVC. This should only be disabled if you are not using the workers. + enabled: true + + # -- Storage class for the transcode volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + # NOTE: This class must support ReadWriteMany otherwise you will encounter errors. + storageClass: ceph-filesystem + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- The size of the transcode volume. + size: 10Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Configure the media volume that will contain all of your media. If you need more volumes you need to add them under + # the pms and worker sections manually. Those volumes must already be present in the cluster. + # @default -- See below + media: + # -- Enables or disables the volume + enabled: true + + # -- Storage Class for the config volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + # NOTE: This class must support ReadWriteMany otherwise you will encounter errors. + storageClass: ceph-filesystem + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- The amount of storage that is requested for the persistent volume. + size: 100Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Use this section to add additional media mounts if necessary. You can copy the contents of the above media + additionalMediaVolumes: {} + + + + +# -- Configure the Plex Media Server component +# @default -- See below +pms: + # -- Enable or disable the Plex Media Server component + enabled: true + + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the PMS component + # @default -- See below + config: + # -- Set this to 1 if you want only info logging from the transcoder or 0 if you want debugging logs + transcoderVerbose: 1 + + # -- Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). + # If you disable the worker then this will be set to local automatically as that is the only valid option for that confguration. + transcodeOperatingMode: both + + # -- Set the Plex claim token obtained from https://plex.tv/claim + plexClaimToken: + + # -- Set the version of Plex to use. Valid options are docker, latest, public, or a specific version. + # [[ref](https://github.com/linuxserver/docker-plex#application-setup)] + version: docker + + # -- The port that Plex will listen on + port: 32400 + + # -- Enable or disable the local relay function. In most cases this should be left to the default (true). + # If you disable this, you must add the pod IP address of each worker or the pod network CIDR to Plex under the + # `List of IP addresses and networks that are allowed without auth` option in Plex's network configuration. + localRelayEnabled: false + + # -- The port that the relay service will listen on + relayPort: 32499 + + # -- The IP address that plex is using. This is only utilized if you disable the localRelayEnabled option above. + pmsIP: "" + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Configure the ingress for plex here. + # @default -- See below + ingressConfig: + # -- Enables or disables the ingress + enabled: false + + # -- Provide additional annotations which may be required. + annotations: + {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + # -- Provide additional labels which may be required. + labels: {} + + # -- Set the ingressClass that is used for this ingress. + ingressClassName: # "nginx" + + ## Configure the hosts for the ingress + hosts: + - # -- Host address. Helm template can be passed. + host: chart-example.local + ## Configure the paths for the host + paths: + - # -- Path. Helm template can be passed. + path: / + pathType: Prefix + service: + # -- Overrides the service name reference for this path + name: + # -- Overrides the service port reference for this path + port: + + # -- Configure TLS for the ingress. Both secretName and hosts can process a Helm template. + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # -- Configure the volume that stores all the Plex configuration and metadata + # @default -- See below + configVolume: + # -- Enables or disables the volume + enabled: true + + # -- Storage Class for the config volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + storageClass: # "-" + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- AccessMode for the persistent volume. + # Make sure to select an access mode that is supported by your storage provider! + # [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + accessMode: ReadWriteOnce + + # -- The amount of storage that is requested for the persistent volume. + size: 25Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + + # -- Configure the resource requests and limits for the PMS component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 2000m + + # Memory Request Amount + memory: 2Gi + + limits: + # -- CPU Limit amount + cpu: 4000m + + # -- Memory Limit amount + memory: 4Gi + + + + +# -- Configure the orchestrator component +# @default -- See below +orchestrator: + # -- Enable or disable the Orchestrator component + enabled: true + + image: + # -- image repository + repository: ghcr.io/pabloromeo/clusterplex_orchestrator + + # -- image pull policy + pullPolicy: IfNotPresent + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the Orchestrator component + # @default -- See below + config: + # -- The port that the Orchestrator will listen on + port: 3500 + + # -- Configures how the worker is chosen when a transcoding job is initiated. + # Options are LOAD_CPU, LOAD_TASKS, RR, and LOAD_RANK (default). + # [[ref]](https://github.com/pabloromeo/clusterplex/tree/master/docs#orchestrator) + workerSelectionStrategy: LOAD_RANK + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # -- Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Configure a ServiceMonitor for use with Prometheus monitoring + # @default -- See below + prometheusServiceMonitor: + # -- Enable the ServiceMonitor creation + enabled: false + + # -- Provide additional additions which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Provide a custom selector if desired. Note that this will take precedent over the default + # method of using the orchestrators namespace. This usually should not be required. + customSelector: {} + + # -- Configure how often Prometheus should scrape this metrics endpoint in seconds + scrapeInterval: 30s + + # -- Configure how long Prometheus should wait for the endpoint to reply before + # considering the request to have timed out. + scrapeTimeout: 10s + + # -- Configures if the Grafana dashboard for the orchestrator component is deployed to the cluster or not. + # If enabled, this creates a ConfigMap containing the dashboard JSON so that your Gradana instance can detect it. + # This requires your grafana instance to have the grafana.sidecar.dashboards.enabled to be true and the searchNamespace + # to be set to ALL otherwise this will not be discovered. + enableGrafanaDashboard: false + + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + + # -- Configure the resource requests and limits for the orchestrator component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 200m + + # Memory Request Amount + memory: 64Mi + + limits: + # -- CPU Limit amount + cpu: 500m + + # -- Memory Limit amount + memory: 128Mi + + + + +# -- Configure the worker component +# @default -- See below +worker: + # -- Enable or disable the Worker component + enabled: true + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the worker component + # @default -- See below + config: + # -- The number of instances of the worker to run + replicas: 0 + + # -- The port the worker will expose its metrics on for the orchestrator to find + port: 3501 + + # -- The frequency at which workers send stats to the orchestrator in ms + cpuStatInterval: 10000 + + # -- Controls usage of the EasyAudioDecoder 1 = ON (default) and 0 = OFF + eaeSupport: 1 + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Enable or disable the per-pod volumes that cache the codecs. This saves a great deal of time when starting the workers. + # @default -- See below + codecVolumes: + # -- Enable or disable the creation of the codec volumes + enabled: true + + # -- Add any extra labels needed + labels: {} + + # -- Add any extra annotations needed + annotations: {} + + # -- AccessMode for the persistent volume. + # Make sure to select an access mode that is supported by your storage provider! + # [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + accessMode: ReadWriteOnce + + # -- The size of the volume + size: 1Gi + + # -- Storage Class for the codec volumes + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + storageClass: + + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + + # -- Configure the resource requests and limits for the worker component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 2000m + + # -- Memory Request Amount + memory: 3Gi + + limits: + # -- CPU Limit amount + cpu: 4000m + + # -- Memory Limit amount + memory: 6Gi + + # -- Configure the affinity rules for the worker pods. This helps prevent multiple worker pods from + # being scheduled on the same node as another worker pod or as the main plex media server. + affinity: {} + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - podAffinityTerm: + # labelSelector: + # matchLabels: + # name: clusterplex-worker + # topologyKey: kubernetes.io/hostname + # weight: 100 + # - podAffinityTerm: + # labelSelector: + # matchLabels: + # name: clusterplex-pms + # topologyKey: kubernetes.io/hostname + # weight: 50 \ No newline at end of file diff --git a/charts/clusterplex/templates/pms.yaml b/charts/clusterplex/templates/pms.yaml index f64af96..eab2522 100644 --- a/charts/clusterplex/templates/pms.yaml +++ b/charts/clusterplex/templates/pms.yaml @@ -17,12 +17,27 @@ configMaps: PUID: '{{ .Values.global.PUID | default "1000" }}' DOCKER_MODS: 'ghcr.io/pabloromeo/clusterplex_dockermod:{{ .Values.global.clusterplexVersion | default .Chart.AppVersion }}' ORCHESTRATOR_URL: 'http://{{ .Release.Name }}-orchestrator:{{ .Values.orchestrator.config.port | default "3500" }}' + {{ if .Values.pms.config.localRelayEnabled }} PMS_SERVICE: '{{ .Release.Name }}-pms' + {{ else }} + {{- if not (regexMatch "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$" .Values.pms.config.pmsIP) }} + {{- fail "Invalid or empty pmsIP provided. This must be a valid IP address if localRelayEnabled is false." }} + {{- end }} + PMS_IP: '{{ .Values.pms.config.pmsIP }}' + {{- end }} PMS_PORT: '{{ .Values.pms.config.port | default "32400" }}' TRANSCODER_VERBOSE: '{{ .Values.pms.config.transcoderVerbose | default "1" }}' + {{ if .Values.worker.enabled }} TRANSCODE_OPERATING_MODE: '{{ .Values.pms.config.transcodeOperatingMode | default "both" }}' + {{ else }} + TRANSCODE_OPERATING_MODE: 'local' + {{- end }} + {{ if .Values.pms.config.localRelayEnabled }} LOCAL_RELAY_ENABLED: '1' LOCAL_RELAY_PORT: '{{ .Values.pms.config.relayPort | default "32499" }}' + {{ else }} + LOCAL_RELAY_ENABLED: '0' + {{- end }} {{ if .Values.pms.config.plexClaimToken }} secrets: diff --git a/charts/clusterplex/templates/worker.yaml b/charts/clusterplex/templates/worker.yaml index e0b922b..25d5ad6 100644 --- a/charts/clusterplex/templates/worker.yaml +++ b/charts/clusterplex/templates/worker.yaml @@ -9,7 +9,10 @@ image: controller: type: statefulset - replicas: '{{ .Values.worker.config.replicas | default 1 }}' + {{- if and .Values.worker.enabled (lt (.Values.worker.config.replicas | int) 1) }} + {{- fail "Invalid value for worker.config.replicas. Must be greater than or equal to 1 when worker.enabled is true." }} + {{- end }} + replicas: '{{ .Values.worker.config.replicas }}' configMaps: config: @@ -123,7 +126,7 @@ probes: scheme: HTTP path: /health port: http - initialDelaySeconds: 60 + initialDelaySeconds: 120 timeoutSeconds: 5 periodSeconds: 10 successThreshold: 1 @@ -141,7 +144,7 @@ probes: scheme: HTTP path: /health port: http - initialDelaySeconds: 60 + initialDelaySeconds: 120 timeoutSeconds: 5 periodSeconds: 10 successThreshold: 1 diff --git a/charts/clusterplex/values.yaml b/charts/clusterplex/values.yaml index b7eb61c..77c46b5 100644 --- a/charts/clusterplex/values.yaml +++ b/charts/clusterplex/values.yaml @@ -116,7 +116,7 @@ pms: transcoderVerbose: 1 # -- Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). - # You MUST set this to local if you disable the worker installation. + # If you disable the worker then this will be set to local automatically as that is the only valid option for that confguration. transcodeOperatingMode: both # -- Set the Plex claim token obtained from https://plex.tv/claim @@ -129,9 +129,17 @@ pms: # -- The port that Plex will listen on port: 32400 + # -- Enable or disable the local relay function. In most cases this should be left to the default (true). + # If you disable this, you must add the pod IP address of each worker or the pod network CIDR to Plex under the + # `List of IP addresses and networks that are allowed without auth` option in Plex's network configuration. + localRelayEnabled: true + # -- The port that the relay service will listen on relayPort: 32499 + # -- The IP address that plex is using. This is only utilized if you disable the localRelayEnabled option above. + pmsIP: "" + # -- Configure the kubernetes service associated with the the PMS component # @default -- See below serviceConfig: