From 400c43d35db268bec7fdc305c2d24d38f2a9f4a5 Mon Sep 17 00:00:00 2001 From: Brandan Schmitz Date: Wed, 7 Jun 2023 21:54:26 -0500 Subject: [PATCH 1/5] Add initial helm chart --- .github/workflows/chart.yaml | 74 +++ .gitignore | 3 + README.md | 3 +- charts/clusterplex/.helmignore | 24 + charts/clusterplex/Chart.lock | 6 + charts/clusterplex/Chart.yaml | 16 + charts/clusterplex/README.md | 145 ++++++ charts/clusterplex/README.md.gotmpl | 30 ++ .../clusterplex/templates/orchestrator.yaml | 79 ++++ charts/clusterplex/templates/pms.yaml | 150 ++++++ .../clusterplex/templates/shared-storage.yaml | 29 ++ charts/clusterplex/templates/worker.yaml | 145 ++++++ charts/clusterplex/values.yaml | 435 ++++++++++++++++++ 13 files changed, 1138 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/chart.yaml create mode 100644 charts/clusterplex/.helmignore create mode 100644 charts/clusterplex/Chart.lock create mode 100644 charts/clusterplex/Chart.yaml create mode 100644 charts/clusterplex/README.md create mode 100644 charts/clusterplex/README.md.gotmpl create mode 100644 charts/clusterplex/templates/orchestrator.yaml create mode 100644 charts/clusterplex/templates/pms.yaml create mode 100644 charts/clusterplex/templates/shared-storage.yaml create mode 100644 charts/clusterplex/templates/worker.yaml create mode 100644 charts/clusterplex/values.yaml diff --git a/.github/workflows/chart.yaml b/.github/workflows/chart.yaml new file mode 100644 index 0000000..a902442 --- /dev/null +++ b/.github/workflows/chart.yaml @@ -0,0 +1,74 @@ +name: Release Charts + +on: + push: + branches: + - master + tags: + - 'v*.*.*' + +jobs: + release: + permissions: + contents: write + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + path: "src" + fetch-depth: 0 + + - name: Checkout gh-pages branch + uses: actions/checkout@v3 + with: + path: "dest" + ref: "gh-pages" + fetch-depth: 0 + + - name: Install Helm + uses: azure/setup-helm@v3 + with: + version: 3.12.0 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Install Helm Docs + run: | + cd /tmp + wget https://github.com/norwoodj/helm-docs/releases/download/v1.11.0/helm-docs_1.11.0_Linux_x86_64.tar.gz + tar -xvf helm-docs_1.11.0_Linux_x86_64.tar.gz + sudo mv helm-docs /usr/local/sbin + + - name: Generate Helm Docs + shell: bash + run: | + helm-docs --chart-search-root=src/charts/clusterplex --sort-values-order=file + cp -f src/charts/clusterplex/README.md dest/README.md + + - name: Package Helm Chart + shell: bash + run: | + helm package src/charts/clusterplex --dependency-update --destination dest/ + + - name: Update Chart Index + shell: bash + working-directory: dest + run: | + helm repo index . --url https://pabloromeo.github.io/clusterplex + + - name: Commit changes + uses: stefanzweifel/git-auto-commit-action@v4 + id: auto-commit + with: + repository: dest + branch: gh-pages + file_pattern: "index.yaml *.tgz *.md" + + - name: Wait for deploy + uses: fountainhead/action-wait-for-check@v1.1.0 + if: ${{ steps.auto-commit.outputs.changes_detected }} + id: wait-for-deploy + with: + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ steps.auto-commit.outputs.commit_hash }} + checkName: deploy \ No newline at end of file diff --git a/.gitignore b/.gitignore index f21a55c..3e99214 100644 --- a/.gitignore +++ b/.gitignore @@ -61,3 +61,6 @@ typings/ .next docker-compose.yml + +# macOS +**/.DS_STORE diff --git a/README.md b/README.md index 322625f..56969fa 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ Workers require a path to store downloaded codecs for the particular architectur Codecs are downloaded when the worker container starts up. The path within the container is **/codecs**, which you can mount to a volume in order to have them persisted across container recreations. Subdirectories for each plex version and architecture are created within it. - + ## Network settings in PMS ## Latest versions of ClusterPlex don't require any special network configuration, due to the new **Local Relay** functionality which forwards calls from Workers to Plex, which is enabled by default. @@ -91,3 +91,4 @@ See the [docs](docs/) section for details on each component's configuration para * [On Kubernetes](docs/kubernetes/) * [On Docker Swarm](docs/docker-swarm/) * [Grafana Dashboard and Metrics](docs/grafana-dashboard/) +* [On Kubernetes via Helm](https://pabloromeo.github.io/clusterplex) diff --git a/charts/clusterplex/.helmignore b/charts/clusterplex/.helmignore new file mode 100644 index 0000000..04ecd88 --- /dev/null +++ b/charts/clusterplex/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +README.md.gotmpl +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/clusterplex/Chart.lock b/charts/clusterplex/Chart.lock new file mode 100644 index 0000000..9dda564 --- /dev/null +++ b/charts/clusterplex/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://bjw-s.github.io/helm-charts + version: 1.5.1 +digest: sha256:3588c89621170f198d4938664d3ea4c469bd91fd78183c83cfcf63f474d348c4 +generated: "2023-06-06T20:59:49.839068-05:00" diff --git a/charts/clusterplex/Chart.yaml b/charts/clusterplex/Chart.yaml new file mode 100644 index 0000000..a320af1 --- /dev/null +++ b/charts/clusterplex/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: clusterplex +description: ClusterPlex is basically an extended version of Plex, which supports distributed Workers across a cluster to handle transcoding requests. +type: application +kubeVersion: ">=1.24.0-0" +dependencies: + - name: common + repository: https://bjw-s.github.io/helm-charts + version: 1.5.1 +home: https://github.com/pabloromeo/clusterplex/charts/clusterplex +sources: + - https://github.com/pabloromeo/clusterplex + - https://github.com/linuxserver/docker-plex + - https://plex.tv +version: 1.0.0 +appVersion: 1.4.4 \ No newline at end of file diff --git a/charts/clusterplex/README.md b/charts/clusterplex/README.md new file mode 100644 index 0000000..9ac4c87 --- /dev/null +++ b/charts/clusterplex/README.md @@ -0,0 +1,145 @@ +# clusterplex + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.4](https://img.shields.io/badge/AppVersion-1.4.4-informational?style=flat-square) + +ClusterPlex is basically an extended version of Plex, which supports distributed Workers across a cluster to handle transcoding requests. + +
+ +## Source Code + +* +* +* + +
+ +## Requirements + +Kubernetes: `>=1.24.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| https://bjw-s.github.io/helm-charts | common | 1.5.1 | + +
+ +## Installing the Chart + +To install the chart with the release name `clusterplex`: + +```console +$ helm repo add clusterplex http://pabloromeo.github.io/clusterplex +$ helm install clusterplex clusterplex/clusterplex +``` + +
+ +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.plexImage | object | See below | Configure the plex image that will be used for the PMS and Worker components | +| global.plexImage.repository | string | `"linuxserver/plex"` | The image that will be used | +| global.plexImage.tag | string | `"latest"` | The image tag to use | +| global.plexImage.imagePullPolicy | string | `"Always"` | Defines when the image should be pulled. Options are Always (default), IfNotPresent, and Never | +| global.clusterplexVersion | string | The appVersion for this chart | The CluterPlex version of docker mod images to pull | +| global.timezone | string | `"America/Chicago"` | The timezone configured for each pod | +| global.PGID | int | `1000` | The process group ID that the LinuxServer Plex container will run Plex/Worker as. | +| global.PUID | int | `1000` | The process user ID that the LinuxServer Plex container will run Plex/Worker as. | +| global.sharedStorage.transcode | object | See below | Configure the volume that will be mounted to the PMS and worker pods for a shared location for transcoding files. | +| global.sharedStorage.transcode.enabled | bool | `true` | Enable or disable the transcode PVC. This should only be disabled if you are not using the workers. | +| global.sharedStorage.transcode.storageClass | string | `nil` | Storage class for the transcode volume. If set to `-`, dynamic provisioning is disabled. If set to something else, the given storageClass is used. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. NOTE: This class must support ReadWriteMany otherwise you will encounter errors. | +| global.sharedStorage.transcode.existingClaim | string | `nil` | If you want to reuse an existing claim, the name of the existing PVC can be passed here. | +| global.sharedStorage.transcode.subPath | string | `nil` | Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root | +| global.sharedStorage.transcode.size | string | `"10Gi"` | The size of the transcode volume. | +| global.sharedStorage.transcode.retain | bool | `true` | Set to true to retain the PVC upon `helm uninstall` | +| global.sharedStorage.media | object | See below | Configure the media volume that will contain all of your media. If you need more volumes you need to add them under the pms and worker sections manually. Those volumes must already be present in the cluster. | +| global.sharedStorage.media.enabled | bool | `true` | Enables or disables the volume | +| global.sharedStorage.media.storageClass | string | `nil` | Storage Class for the config volume. If set to `-`, dynamic provisioning is disabled. If set to something else, the given storageClass is used. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. NOTE: This class must support ReadWriteMany otherwise you will encounter errors. | +| global.sharedStorage.media.existingClaim | string | `nil` | If you want to reuse an existing claim, the name of the existing PVC can be passed here. | +| global.sharedStorage.media.subPath | string | `nil` | Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root | +| global.sharedStorage.media.size | string | `"100Gi"` | The amount of storage that is requested for the persistent volume. | +| global.sharedStorage.media.retain | bool | `true` | Set to true to retain the PVC upon `helm uninstall` | +| global.sharedStorage.additionalMediaVolumes | object | `{}` | Use this section to add additional media mounts if necessary. You can copy the contents of the above media | +| pms | object | See below | Configure the Plex Media Server component | +| pms.enabled | bool | `true` | Enable or disable the Plex Media Server component | +| pms.env | string | `nil` | Additional environment variables. Template enabled. Syntax options: A) TZ: UTC B) PASSWD: '{{ .Release.Name }}' C) PASSWD: configMapKeyRef: name: config-map-name key: key-name D) PASSWD: valueFrom: secretKeyRef: name: secret-name key: key-name ... E) - name: TZ value: UTC F) - name: TZ value: '{{ .Release.Name }}' | +| pms.config | object | See below | Supply the configuration items used to configure the PMS component | +| pms.config.transcoderVerbose | int | `1` | Set this to 1 if you want only info logging from the transcoder or 0 if you want debugging logs | +| pms.config.transcodeOperatingMode | string | `"both"` | Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). You MUST set this to local if you disable the worker installation. | +| pms.config.plexClaimToken | string | `nil` | Set the Plex claim token obtained from https://plex.tv/claim | +| pms.config.version | string | `"docker"` | Set the version of Plex to use. Valid options are docker, latest, public, or a specific version. [[ref](https://github.com/linuxserver/docker-plex#application-setup)] | +| pms.config.port | int | `32400` | The port that Plex will listen on | +| pms.config.relayPort | int | `32499` | The port that the relay service will listen on | +| pms.serviceConfig | object | See below | Configure the kubernetes service associated with the the PMS component | +| pms.serviceConfig.externalTrafficPolicy | string | `nil` | Specify the externalTrafficPolicy for the service. Options: Cluster, Local [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] | +| pms.serviceConfig.annotations | object | `{}` | Provide additional annotations which may be required. | +| pms.serviceConfig.labels | object | `{}` | Provide additional labels which may be required. | +| pms.ingressConfig | object | See below | Configure the ingress for plex here. | +| pms.ingressConfig.enabled | bool | `false` | Enables or disables the ingress | +| pms.ingressConfig.annotations | object | `{}` | Provide additional annotations which may be required. | +| pms.ingressConfig.labels | object | `{}` | Provide additional labels which may be required. | +| pms.ingressConfig.ingressClassName | string | `nil` | Set the ingressClass that is used for this ingress. | +| pms.ingressConfig.hosts[0].host | string | `"chart-example.local"` | Host address. Helm template can be passed. | +| pms.ingressConfig.hosts[0].paths[0].path | string | `"/"` | Path. Helm template can be passed. | +| pms.ingressConfig.hosts[0].paths[0].service.name | string | `nil` | Overrides the service name reference for this path | +| pms.ingressConfig.hosts[0].paths[0].service.port | string | `nil` | Overrides the service port reference for this path | +| pms.ingressConfig.tls | list | `[]` | Configure TLS for the ingress. Both secretName and hosts can process a Helm template. | +| pms.configVolume | object | See below | Configure the volume that stores all the Plex configuration and metadata | +| pms.configVolume.enabled | bool | `true` | Enables or disables the volume | +| pms.configVolume.storageClass | string | `nil` | Storage Class for the config volume. If set to `-`, dynamic provisioning is disabled. If set to something else, the given storageClass is used. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| pms.configVolume.existingClaim | string | `nil` | If you want to reuse an existing claim, the name of the existing PVC can be passed here. | +| pms.configVolume.subPath | string | `nil` | Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root | +| pms.configVolume.accessMode | string | `"ReadWriteOnce"` | AccessMode for the persistent volume. Make sure to select an access mode that is supported by your storage provider! [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) | +| pms.configVolume.size | string | `"25Gi"` | The amount of storage that is requested for the persistent volume. | +| pms.configVolume.retain | bool | `true` | Set to true to retain the PVC upon `helm uninstall` | +| pms.resources | object | See below | Configure the resource requests and limits for the PMS component | +| pms.resources.requests.cpu | int | `2` | CPU Request amount | +| pms.resources.limits.cpu | int | `4` | CPU Limit amount | +| pms.resources.limits.memory | string | `"4Gi"` | Memory Limit amount | +| orchestrator | object | See below | Configure the orchestrator component | +| orchestrator.enabled | bool | `true` | Enable or disable the Orchestrator component | +| orchestrator.image.repository | string | `"ghcr.io/pabloromeo/clusterplex_orchestrator"` | image repository | +| orchestrator.image.pullPolicy | string | `"IfNotPresent"` | image pull policy | +| orchestrator.env | string | `nil` | Additional environment variables. Template enabled. Syntax options: A) TZ: UTC B) PASSWD: '{{ .Release.Name }}' C) PASSWD: configMapKeyRef: name: config-map-name key: key-name D) PASSWD: valueFrom: secretKeyRef: name: secret-name key: key-name ... E) - name: TZ value: UTC F) - name: TZ value: '{{ .Release.Name }}' | +| orchestrator.config | object | See below | Supply the configuration items used to configure the Orchestrator component | +| orchestrator.config.port | int | `3500` | The port that the Orchestrator will listen on | +| orchestrator.config.workerSelectionStrategy | string | `"LOAD_RANK"` | Configures how the worker is chosen when a transcoding job is initiated. Options are LOAD_CPU, LOAD_TASKS, RR, and LOAD_RANK (default). [[ref]](https://github.com/pabloromeo/clusterplex/tree/master/docs#orchestrator) | +| orchestrator.serviceConfig | object | See below | Configure the kubernetes service associated with the the PMS component | +| orchestrator.serviceConfig.type | string | `"ClusterIP"` | Configure the type of service | +| orchestrator.serviceConfig.externalTrafficPolicy | string | `nil` | Specify the externalTrafficPolicy for the service. Options: Cluster, Local [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] | +| orchestrator.serviceConfig.annotations | object | `{}` | Provide additional annotations which may be required. | +| orchestrator.serviceConfig.labels | object | `{}` | Provide additional labels which may be required. | +| orchestrator.resources | object | See below | Configure the resource requests and limits for the orchestrator component | +| orchestrator.resources.requests.cpu | string | `"200m"` | CPU Request amount | +| orchestrator.resources.limits.cpu | string | `"500m"` | CPU Limit amount | +| orchestrator.resources.limits.memory | string | `"128Mi"` | Memory Limit amount | +| worker | object | See below | Configure the worker component | +| worker.enabled | bool | `true` | Enable or disable the Worker component | +| worker.env | string | `nil` | Additional environment variables. Template enabled. Syntax options: A) TZ: UTC B) PASSWD: '{{ .Release.Name }}' C) PASSWD: configMapKeyRef: name: config-map-name key: key-name D) PASSWD: valueFrom: secretKeyRef: name: secret-name key: key-name ... E) - name: TZ value: UTC F) - name: TZ value: '{{ .Release.Name }}' | +| worker.config | object | See below | Supply the configuration items used to configure the worker component | +| worker.config.replicas | int | `2` | The number of instances of the worker to run | +| worker.config.port | int | `3501` | The port the worker will expose its metrics on for the orchestrator to find | +| worker.config.cpuStatInterval | int | `10000` | The frequency at which workers send stats to the orchestrator in ms | +| worker.config.eaeSupport | int | `1` | Controls usage of the EasyAudioDecoder 1 = ON (default) and 0 = OFF | +| worker.serviceConfig | object | See below | Configure the kubernetes service associated with the the PMS component | +| worker.serviceConfig.externalTrafficPolicy | string | `nil` | Specify the externalTrafficPolicy for the service. Options: Cluster, Local [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] | +| worker.serviceConfig.annotations | object | `{}` | Provide additional annotations which may be required. | +| worker.serviceConfig.labels | object | `{}` | Provide additional labels which may be required. | +| worker.codecVolumes | object | See below | Enable or disable the per-pod volumes that cache the codecs. This saves a great deal of time when starting the workers. | +| worker.codecVolumes.enabled | bool | `true` | Enable or disable the creation of the codec volumes | +| worker.codecVolumes.labels | object | `{}` | Add any extra labels needed | +| worker.codecVolumes.annotations | object | `{}` | Add any extra annotations needed | +| worker.codecVolumes.accessMode | string | `"ReadWriteOnce"` | AccessMode for the persistent volume. Make sure to select an access mode that is supported by your storage provider! [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) | +| worker.codecVolumes.size | string | `"1Gi"` | The size of the volume | +| worker.codecVolumes.storageClass | string | `nil` | Storage Class for the codec volumes If set to `-`, dynamic provisioning is disabled. If set to something else, the given storageClass is used. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| worker.resources | object | See below | Configure the resource requests and limits for the worker component | +| worker.resources.requests.cpu | string | `"2000m"` | CPU Request amount | +| worker.resources.requests.memory | string | `"3Gi"` | Memory Request Amount | +| worker.resources.limits.cpu | string | `"4000m"` | CPU Limit amount | +| worker.resources.limits.memory | string | `"6Gi"` | Memory Limit amount | +| worker.affinity | object | `{}` | Configure the affinity rules for the worker pods. This helps prevent multiple worker pods from being scheduled on the same node as another worker pod or as the main plex media server. | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) \ No newline at end of file diff --git a/charts/clusterplex/README.md.gotmpl b/charts/clusterplex/README.md.gotmpl new file mode 100644 index 0000000..be3d13d --- /dev/null +++ b/charts/clusterplex/README.md.gotmpl @@ -0,0 +1,30 @@ +{{ template "chart.header" . }} + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +{{ template "chart.description" . }} + +
+ +{{ template "chart.sourcesSection" . }} + +
+ +{{ template "chart.requirementsSection" . }} + +
+ +## Installing the Chart + +To install the chart with the release name `clusterplex`: + +```console +$ helm repo add clusterplex http://pabloromeo.github.io/clusterplex +$ helm install clusterplex clusterplex/clusterplex +``` + +
+ +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} \ No newline at end of file diff --git a/charts/clusterplex/templates/orchestrator.yaml b/charts/clusterplex/templates/orchestrator.yaml new file mode 100644 index 0000000..b7fa96b --- /dev/null +++ b/charts/clusterplex/templates/orchestrator.yaml @@ -0,0 +1,79 @@ +{{- define "clusterplex.orchestrator.hardcodedValues" -}} +global: + nameOverride: "orchestrator" + +image: + tag: {{ .Values.global.clusterplexVersion | default .Chart.AppVersion | quote }} + +configMaps: + config: + enabled: true + data: + TZ: '{{ .Values.global.timezone | default "America/Chicago" }}' + LISTENING_PORT: '{{ .Values.orchestrator.config.port | default "3500" }}' + WORKER_SELECTION_STRATEGY: '{{ .Values.orchestrator.config.workerSelectionStrategy | default "LOAD_RANK" }}' + +envFrom: + - configMapRef: + name: "{{ .Release.Name }}-orchestrator-config" + +service: + main: + type: '{{ .Values.orchestrator.serviceConfig.type | default "ClusterIP" }}' + externalTrafficPolicy: '{{ .Values.orchestrator.serviceConfig.externalTrafficPolicy }}' + annotations: + {{- toYaml .Values.orchestrator.serviceConfig.annotations | nindent 6 }} + labels: + {{- toYaml .Values.orchestrator.serviceConfig.labels | nindent 6 }} + ports: + http: + enabled: true + primary: true + port: '{{ .Values.orchestrator.config.port | default "3500" }}' + protocol: TCP + +probes: + startup: + enabled: false + custom: true + spec: + httpGet: + scheme: HTTP + path: /health + port: http + periodSeconds: 10 + failureThreshold: 15 + readiness: + enabled: true + custom: true + spec: + httpGet: + scheme: HTTP + path: /health + port: http + initialDelaySeconds: 10 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 15 + liveness: + enabled: true + custom: true + spec: + httpGet: + scheme: HTTP + path: /health + port: http + initialDelaySeconds: 30 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 +{{- end }} + +{{ if .Values.orchestrator.enabled }} +{{- $ctx := deepCopy . -}} +{{- $_ := get .Values "orchestrator" | mergeOverwrite $ctx.Values -}} +{{- $_ = include "clusterplex.orchestrator.hardcodedValues" . | fromYaml | merge $ctx.Values -}} +{{- include "bjw-s.common.loader.all" $ctx }} +{{ end }} \ No newline at end of file diff --git a/charts/clusterplex/templates/pms.yaml b/charts/clusterplex/templates/pms.yaml new file mode 100644 index 0000000..12f586e --- /dev/null +++ b/charts/clusterplex/templates/pms.yaml @@ -0,0 +1,150 @@ +{{- define "clusterplex.pms.hardcodedValues" -}} +global: + nameOverride: "pms" + +image: + repository: '{{ .Values.global.plexImage.repository }}' + tag: '{{ .Values.global.plexImage.tag | default "latest" }}' + imagePullPolicy: '{{ .Values.global.plexImage.imagePullPolicy }}' + +configMaps: + config: + enabled: true + data: + VERSION: '{{ .Values.pms.config.version | default "docker" }}' + TZ: '{{ .Values.global.timezone | default "America/Chicago" }}' + PGID: '{{ .Values.global.PGID | default "1000" }}' + PUID: '{{ .Values.global.PUID | default "1000" }}' + DOCKER_MODS: 'ghcr.io/pabloromeo/clusterplex_dockermod:{{ .Values.global.clusterplexVersion | default .Chart.AppVersion }}' + ORCHESTRATOR_URL: 'http://{{ .Release.Name }}-orchestrator:{{ .Values.orchestrator.config.port | default "3500" }}' + PMS_SERVICE: '{{ .Release.Name }}-pms' + PMS_PORT: '{{ .Values.pms.config.port | default "32400" }}' + TRANSCODER_VERBOSE: '{{ .Values.pms.config.transcoderVerbose | default "1" }}' + TRANSCODE_OPERATING_MODE: '{{ .Values.pms.config.transcodeOperatingMode | default "both" }}' + LOCAL_RELAY_ENABLED: '1' + LOCAL_RELAY_PORT: '{{ .Values.pms.config.relayPort | default "32499" }}' + +{{ if .Values.pms.config.plexClaimToken }} +secrets: + config: + enabled: true + stringData: + PLEX_CLAIM: {{ .Values.pms.config.plexClaimToken }} +{{- end }} + +envFrom: + - configMapRef: + name: "{{ .Release.Name }}-pms-config" + {{ if .Values.pms.config.plexClaimToken }} + - secretRef: + name: "{{ .Release.Name }}-pms-config" + {{- end }} + +initContainers: + {{ if .Values.global.sharedStorage.transcode.enabled }} + set-transcode-permissions: + image: busybox:1.36.1 + command: ['sh', '-c', 'chown -R {{ .Values.global.PUID | default "1000" }}:{{ .Values.global.PGID | default "1000" }} /transcode && chmod 0755 -R /transcode && echo "Configured /transcode permissions"' ] + volumeMounts: + - name: transcode + mountPath: /transcode + {{- end }} + + +service: + main: + type: '{{ .Values.pms.serviceConfig.type | default "ClusterIP" }}' + externalTrafficPolicy: '{{ .Values.pms.serviceConfig.externalTrafficPolicy }}' + annotations: + {{- toYaml .Values.pms.serviceConfig.annotations | nindent 6 }} + labels: + {{- toYaml .Values.pms.serviceConfig.labels | nindent 6 }} + ports: + http: + enabled: false + primary: false + plex: + enabled: true + primary: true + port: '{{ .Values.pms.config.port | default "32400" }}' + protocol: TCP + relay: + enabled: true + primary: false + port: '{{ .Values.pms.config.relayPort | default "32499" }}' + protocol: TCP + +{{ if .Values.pms.ingressConfig.enabled }} +ingress: + main: + primary: true + {{- toYaml .Values.pms.ingressConfig | nindent 4 }} +{{- end }} + +persistence: + config: + {{- toYaml .Values.pms.configVolume | nindent 4 }} + transcode: + {{- toYaml .Values.global.sharedStorage.transcode | nindent 4 }} + accessMode: ReadWriteMany + {{ if not .Values.global.sharedStorage.transcode.existingClaim }} + existingClaim: {{ .Release.Name }}-transcode + {{- end }} + media: + {{- toYaml .Values.global.sharedStorage.media | nindent 4 }} + accessMode: ReadWriteMany + {{ if not .Values.global.sharedStorage.media.existingClaim }} + existingClaim: {{ .Release.Name }}-media + {{- end }} +{{- range $key, $value := .Values.global.sharedStorage.additionalMediaVolumes -}} + {{ $key | nindent 2}}: + {{- toYaml $value | nindent 4 }} + accessMode: "ReadWriteMany" + existingClaim: {{ if not $value.existingClaim }}{{ $.Release.Name }}-{{ $key }}{{ else }}{{ $value.existingClaim }}{{ end }} +{{- end }} + +probes: + startup: + enabled: false + custom: true + spec: + httpGet: + scheme: HTTP + path: /identity + port: plex + periodSeconds: 10 + failureThreshold: 30 + readiness: + enabled: true + custom: true + spec: + httpGet: + scheme: HTTP + path: /identity + port: plex + initialDelaySeconds: 30 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 30 + liveness: + enabled: true + custom: true + spec: + httpGet: + scheme: HTTP + path: /identity + port: plex + initialDelaySeconds: 120 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 +{{- end }} + +{{ if .Values.pms.enabled }} +{{- $ctx := deepCopy . -}} +{{- $_ := get .Values "pms" | mergeOverwrite $ctx.Values -}} +{{- $_ = include "clusterplex.pms.hardcodedValues" . | fromYaml | merge $ctx.Values -}} +{{- include "bjw-s.common.loader.all" $ctx }} +{{ end }} \ No newline at end of file diff --git a/charts/clusterplex/templates/shared-storage.yaml b/charts/clusterplex/templates/shared-storage.yaml new file mode 100644 index 0000000..8ac2304 --- /dev/null +++ b/charts/clusterplex/templates/shared-storage.yaml @@ -0,0 +1,29 @@ +{{- define "clusterplex.sharedStorage.hardcodedValues" -}} +global: + nameOverride: "{{ .Release.Name }}" + +controller: + enabled: false +service: + main: + enabled: false + +persistence: + transcode: + {{- toYaml .Values.global.sharedStorage.transcode | nindent 4 }} + accessMode: ReadWriteMany + media: + {{- toYaml .Values.global.sharedStorage.media | nindent 4 }} + accessMode: ReadWriteMany + {{- range $key, $value := .Values.global.sharedStorage.additionalMediaVolumes -}} + {{ $key | nindent 2}}: + {{- toYaml $value | nindent 4 }} + accessMode: "ReadWriteMany" + {{- end }} +{{- end }} + + +{{- $ctx := deepCopy . -}} +{{- $_ := get .Values "global" | mergeOverwrite $ctx.Values -}} +{{- $_ = include "clusterplex.sharedStorage.hardcodedValues" . | fromYaml | merge $ctx.Values -}} +{{- include "bjw-s.common.loader.all" $ctx }} \ No newline at end of file diff --git a/charts/clusterplex/templates/worker.yaml b/charts/clusterplex/templates/worker.yaml new file mode 100644 index 0000000..634a0e0 --- /dev/null +++ b/charts/clusterplex/templates/worker.yaml @@ -0,0 +1,145 @@ +{{- define "clusterplex.worker.hardcodedValues" -}} +global: + nameOverride: "worker" + +image: + repository: '{{ .Values.global.plexImage.repository }}' + tag: '{{ .Values.global.plexImage.tag | default "latest" }}' + imagePullPolicy: '{{ .Values.global.plexImage.imagePullPolicy }}' + +controller: + type: statefulset + replicas: '{{ .Values.worker.config.replicas | default 1 }}' + +configMaps: + config: + enabled: true + data: + TZ: '{{ .Values.global.timezone | default "America/Chicago" }}' + PGID: '{{ .Values.global.PGID | default "1000" }}' + PUID: '{{ .Values.global.PUID | default "1000" }}' + VERSION: docker + DOCKER_MODS: 'ghcr.io/pabloromeo/clusterplex_worker_dockermod:{{ .Values.global.clusterplexVersion | default .Chart.AppVersion }}' + ORCHESTRATOR_URL: 'http://{{ .Release.Name }}-orchestrator:{{ .Values.orchestrator.config.port | default "3500" }}' + LISTENING_PORT: '{{ .Values.worker.config.port | default "3501" }}' + STAT_CPU_INTERVAL: '{{ .Values.worker.config.cpuStatInterval | default "10000" }}' + EAE_SUPPORT: '{{ .Values.worker.config.eaeSupport | default "1" }}' + +envFrom: + - configMapRef: + name: "{{ .Release.Name }}-worker-config" + + +initContainers: + {{ if .Values.global.sharedStorage.transcode.enabled }} + set-transcode-permissions: + image: busybox:1.36.1 + command: ['sh', '-c', 'chown -R {{ .Values.global.PUID | default "1000" }}:{{ .Values.global.PGID | default "1000" }} /transcode && chmod 0755 -R /transcode && echo "Configured /transcode permissions"' ] + volumeMounts: + - name: transcode + mountPath: /transcode + {{- end }} + {{ if .Values.worker.codecVolumes.enabled }} + set-codec-permissions: + image: busybox:1.36.1 + command: ['sh', '-c', 'chown -R {{ .Values.global.PUID | default "1000" }}:{{ .Values.global.PGID | default "1000" }} /codecs && chmod 0755 -R /codecs && echo "Configured /codecs permissions"' ] + volumeMounts: + - name: codecs + mountPath: /codecs + {{- end }} + +service: + main: + type: '{{ .Values.worker.serviceConfig.type | default "ClusterIP" }}' + externalTrafficPolicy: '{{ .Values.worker.serviceConfig.externalTrafficPolicy }}' + annotations: + {{- toYaml .Values.worker.serviceConfig.annotations | nindent 6 }} + labels: + {{- toYaml .Values.worker.serviceConfig.labels | nindent 6 }} + ports: + http: + enabled: true + primary: true + port: '{{ .Values.worker.config.port | default "3501" }}' + protocol: TCP + +persistence: + transcode: + {{- toYaml .Values.global.sharedStorage.transcode | nindent 4 }} + accessMode: ReadWriteMany + {{ if not .Values.global.sharedStorage.transcode.existingClaim }} + existingClaim: {{ .Release.Name }}-transcode + {{- end }} + media: + {{- toYaml .Values.global.sharedStorage.media | nindent 4 }} + accessMode: ReadWriteMany + {{ if not .Values.global.sharedStorage.media.existingClaim }} + existingClaim: {{ .Release.Name }}-media + {{- end }} +{{- range $key, $value := .Values.global.sharedStorage.additionalMediaVolumes -}} + {{ $key | nindent 2}}: + {{- toYaml $value | nindent 4 }} + accessMode: "ReadWriteMany" + existingClaim: {{ if not $value.existingClaim }}{{ $.Release.Name }}-{{ $key }}{{ else }}{{ $value.existingClaim }}{{ end }} +{{- end }} + +{{ if .Values.worker.codecVolumes.enabled }} +volumeClaimTemplates: + - name: codecs + annotations: + {{- toYaml .Values.worker.codecVolumes.annotations | nindent 6 }} + labels: + {{- toYaml .Values.worker.codecVolumes.labels | nindent 6 }} + mountPath: /codecs + accessMode: '{{ .Values.worker.codecVolumes.accessMode | default "ReadWriteOnce" }}' + size: '{{ .Values.worker.codecVolumes.size | default "1Gi" }}' + {{ if .Values.worker.codecVolumes.storageClass }} + storageClass: {{ if (eq "-" .Values.worker.codecVolumes.storageClass) }}""{{- else }}{{ .Values.worker.codecVolumes.storageClass | quote }}{{- end }} + {{- end }} +{{- end }} + +probes: + startup: + enabled: false + custom: true + spec: + httpGet: + scheme: HTTP + path: /health + port: http + periodSeconds: 10 + failureThreshold: 40 + readiness: + enabled: true + custom: true + spec: + httpGet: + scheme: HTTP + path: /health + port: http + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 15 + liveness: + enabled: true + custom: true + spec: + httpGet: + scheme: HTTP + path: /health + port: http + initialDelaySeconds: 60 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 +{{- end }} + +{{ if .Values.worker.enabled }} +{{- $ctx := deepCopy . -}} +{{- $_ := get .Values "worker" | mergeOverwrite $ctx.Values -}} +{{- $_ = include "clusterplex.worker.hardcodedValues" . | fromYaml | merge $ctx.Values -}} +{{- include "bjw-s.common.loader.all" $ctx }} +{{ end }} \ No newline at end of file diff --git a/charts/clusterplex/values.yaml b/charts/clusterplex/values.yaml new file mode 100644 index 0000000..d2788b1 --- /dev/null +++ b/charts/clusterplex/values.yaml @@ -0,0 +1,435 @@ +global: + # -- Configure the plex image that will be used for the PMS and Worker components + # @default -- See below + plexImage: + # -- The image that will be used + repository: linuxserver/plex + + # -- The image tag to use + tag: latest + + # -- Defines when the image should be pulled. Options are Always (default), IfNotPresent, and Never + imagePullPolicy: Always + + # -- The CluterPlex version of docker mod images to pull + # @default -- The appVersion for this chart + clusterplexVersion: + + # -- The timezone configured for each pod + timezone: America/Chicago + + # -- The process group ID that the LinuxServer Plex container will run Plex/Worker as. + PGID: 1000 + + # -- The process user ID that the LinuxServer Plex container will run Plex/Worker as. + PUID: 1000 + + sharedStorage: + # -- Configure the volume that will be mounted to the PMS and worker pods for a shared location for transcoding files. + # @default -- See below + transcode: + # -- Enable or disable the transcode PVC. This should only be disabled if you are not using the workers. + enabled: true + + # -- Storage class for the transcode volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + # NOTE: This class must support ReadWriteMany otherwise you will encounter errors. + storageClass: # "-" + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- The size of the transcode volume. + size: 10Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Configure the media volume that will contain all of your media. If you need more volumes you need to add them under + # the pms and worker sections manually. Those volumes must already be present in the cluster. + # @default -- See below + media: + # -- Enables or disables the volume + enabled: true + + # -- Storage Class for the config volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + # NOTE: This class must support ReadWriteMany otherwise you will encounter errors. + storageClass: # "-" + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- The amount of storage that is requested for the persistent volume. + size: 100Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Use this section to add additional media mounts if necessary. You can copy the contents of the above media + additionalMediaVolumes: {} + + + + +# -- Configure the Plex Media Server component +# @default -- See below +pms: + # -- Enable or disable the Plex Media Server component + enabled: true + + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the PMS component + # @default -- See below + config: + # -- Set this to 1 if you want only info logging from the transcoder or 0 if you want debugging logs + transcoderVerbose: 1 + + # -- Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). + # You MUST set this to local if you disable the worker installation. + transcodeOperatingMode: both + + # -- Set the Plex claim token obtained from https://plex.tv/claim + plexClaimToken: + + # -- Set the version of Plex to use. Valid options are docker, latest, public, or a specific version. + # [[ref](https://github.com/linuxserver/docker-plex#application-setup)] + version: docker + + # -- The port that Plex will listen on + port: 32400 + + # -- The port that the relay service will listen on + relayPort: 32499 + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Configure the ingress for plex here. + # @default -- See below + ingressConfig: + # -- Enables or disables the ingress + enabled: false + + # -- Provide additional annotations which may be required. + annotations: + {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + # -- Provide additional labels which may be required. + labels: {} + + # -- Set the ingressClass that is used for this ingress. + ingressClassName: # "nginx" + + ## Configure the hosts for the ingress + hosts: + - # -- Host address. Helm template can be passed. + host: chart-example.local + ## Configure the paths for the host + paths: + - # -- Path. Helm template can be passed. + path: / + pathType: Prefix + service: + # -- Overrides the service name reference for this path + name: + # -- Overrides the service port reference for this path + port: + + # -- Configure TLS for the ingress. Both secretName and hosts can process a Helm template. + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # -- Configure the volume that stores all the Plex configuration and metadata + # @default -- See below + configVolume: + # -- Enables or disables the volume + enabled: true + + # -- Storage Class for the config volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + storageClass: # "-" + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- AccessMode for the persistent volume. + # Make sure to select an access mode that is supported by your storage provider! + # [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + accessMode: ReadWriteOnce + + # -- The amount of storage that is requested for the persistent volume. + size: 25Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Configure the resource requests and limits for the PMS component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 2 + + # Memory Request Amount + memory: 2Gi + + limits: + # -- CPU Limit amount + cpu: 4 + + # -- Memory Limit amount + memory: 4Gi + + + + +# -- Configure the orchestrator component +# @default -- See below +orchestrator: + # -- Enable or disable the Orchestrator component + enabled: true + + image: + # -- image repository + repository: ghcr.io/pabloromeo/clusterplex_orchestrator + + # -- image pull policy + pullPolicy: IfNotPresent + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the Orchestrator component + # @default -- See below + config: + # -- The port that the Orchestrator will listen on + port: 3500 + + # -- Configures how the worker is chosen when a transcoding job is initiated. + # Options are LOAD_CPU, LOAD_TASKS, RR, and LOAD_RANK (default). + # [[ref]](https://github.com/pabloromeo/clusterplex/tree/master/docs#orchestrator) + workerSelectionStrategy: LOAD_RANK + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # -- Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Configure the resource requests and limits for the orchestrator component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 200m + + # Memory Request Amount + memory: 64Mi + + limits: + # -- CPU Limit amount + cpu: 500m + + # -- Memory Limit amount + memory: 128Mi + + + + +# -- Configure the worker component +# @default -- See below +worker: + # -- Enable or disable the Worker component + enabled: true + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the worker component + # @default -- See below + config: + # -- The number of instances of the worker to run + replicas: 2 + + # -- The port the worker will expose its metrics on for the orchestrator to find + port: 3501 + + # -- The frequency at which workers send stats to the orchestrator in ms + cpuStatInterval: 10000 + + # -- Controls usage of the EasyAudioDecoder 1 = ON (default) and 0 = OFF + eaeSupport: 1 + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Enable or disable the per-pod volumes that cache the codecs. This saves a great deal of time when starting the workers. + # @default -- See below + codecVolumes: + # -- Enable or disable the creation of the codec volumes + enabled: true + + # -- Add any extra labels needed + labels: {} + + # -- Add any extra annotations needed + annotations: {} + + # -- AccessMode for the persistent volume. + # Make sure to select an access mode that is supported by your storage provider! + # [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + accessMode: ReadWriteOnce + + # -- The size of the volume + size: 1Gi + + # -- Storage Class for the codec volumes + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + storageClass: + + # -- Configure the resource requests and limits for the worker component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 2000m + + # -- Memory Request Amount + memory: 3Gi + + limits: + # -- CPU Limit amount + cpu: 4000m + + # -- Memory Limit amount + memory: 6Gi + + # -- Configure the affinity rules for the worker pods. This helps prevent multiple worker pods from + # being scheduled on the same node as another worker pod or as the main plex media server. + affinity: {} + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - podAffinityTerm: + # labelSelector: + # matchLabels: + # name: clusterplex-worker + # topologyKey: kubernetes.io/hostname + # weight: 100 + # - podAffinityTerm: + # labelSelector: + # matchLabels: + # name: clusterplex-pms + # topologyKey: kubernetes.io/hostname + # weight: 50 \ No newline at end of file From efb4c99adf5502ec6efa4be32540fd15a24f62f5 Mon Sep 17 00:00:00 2001 From: Brandan Schmitz Date: Thu, 8 Jun 2023 03:19:04 -0500 Subject: [PATCH 2/5] Added support for Prometheus ServiceMonitor and Grafana Dashboard --- charts/clusterplex/README.md | 12 +- .../files/orchestrator-grafana-dashboard.json | 852 ++++++++++++++++++ .../clusterplex/templates/orchestrator.yaml | 25 + charts/clusterplex/values.yaml | 33 +- 4 files changed, 918 insertions(+), 4 deletions(-) create mode 100644 charts/clusterplex/files/orchestrator-grafana-dashboard.json diff --git a/charts/clusterplex/README.md b/charts/clusterplex/README.md index 9ac4c87..9a916fb 100644 --- a/charts/clusterplex/README.md +++ b/charts/clusterplex/README.md @@ -95,8 +95,8 @@ $ helm install clusterplex clusterplex/clusterplex | pms.configVolume.size | string | `"25Gi"` | The amount of storage that is requested for the persistent volume. | | pms.configVolume.retain | bool | `true` | Set to true to retain the PVC upon `helm uninstall` | | pms.resources | object | See below | Configure the resource requests and limits for the PMS component | -| pms.resources.requests.cpu | int | `2` | CPU Request amount | -| pms.resources.limits.cpu | int | `4` | CPU Limit amount | +| pms.resources.requests.cpu | string | `"2000m"` | CPU Request amount | +| pms.resources.limits.cpu | string | `"4000m"` | CPU Limit amount | | pms.resources.limits.memory | string | `"4Gi"` | Memory Limit amount | | orchestrator | object | See below | Configure the orchestrator component | | orchestrator.enabled | bool | `true` | Enable or disable the Orchestrator component | @@ -111,6 +111,14 @@ $ helm install clusterplex clusterplex/clusterplex | orchestrator.serviceConfig.externalTrafficPolicy | string | `nil` | Specify the externalTrafficPolicy for the service. Options: Cluster, Local [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] | | orchestrator.serviceConfig.annotations | object | `{}` | Provide additional annotations which may be required. | | orchestrator.serviceConfig.labels | object | `{}` | Provide additional labels which may be required. | +| orchestrator.prometheusServiceMonitor | object | See below | Configure a ServiceMonitor for use with Prometheus monitoring | +| orchestrator.prometheusServiceMonitor.enabled | bool | `false` | Enable the ServiceMonitor creation | +| orchestrator.prometheusServiceMonitor.annotations | object | `{}` | Provide additional additions which may be required. | +| orchestrator.prometheusServiceMonitor.labels | object | `{}` | Provide additional labels which may be required. | +| orchestrator.prometheusServiceMonitor.customSelector | object | `{}` | Provide a custom selector if desired. Note that this will take precedent over the default method of using the orchestrators namespace. This usually should not be required. | +| orchestrator.prometheusServiceMonitor.scrapeInterval | string | `"30s"` | Configure how often Prometheus should scrape this metrics endpoint in seconds | +| orchestrator.prometheusServiceMonitor.scrapeTimeout | string | `"10s"` | Configure how long Prometheus should wait for the endpoint to reply before considering the request to have timed out. | +| orchestrator.enableGrafanaDashboard | bool | `false` | Configures if the Grafana dashboard for the orchestrator component is deployed to the cluster or not. If enabled, this creates a ConfigMap containing the dashboard JSON so that your Gradana instance can detect it. This requires your grafana instance to have the grafana.sidecar.dashboards.enabled to be true and the searchNamespace to be set to ALL otherwise this will not be discovered. | | orchestrator.resources | object | See below | Configure the resource requests and limits for the orchestrator component | | orchestrator.resources.requests.cpu | string | `"200m"` | CPU Request amount | | orchestrator.resources.limits.cpu | string | `"500m"` | CPU Limit amount | diff --git a/charts/clusterplex/files/orchestrator-grafana-dashboard.json b/charts/clusterplex/files/orchestrator-grafana-dashboard.json new file mode 100644 index 0000000..f7095d4 --- /dev/null +++ b/charts/clusterplex/files/orchestrator-grafana-dashboard.json @@ -0,0 +1,852 @@ +{ + "annotations": { + "list": [ + { + "$$hashKey": "object:7", + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Monitoring dashboard for ClusterPlex deployments", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 355, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 3, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#d44a3a", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 1 + }, + { + "color": "#299c46", + "value": 2 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 0, + "y": 0 + }, + "id": 2, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "9.0.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(workers_active) by (job)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Active Workers", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 10, + "x": 4, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.0.5", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "avg(worker_load_cpu) by (worker_name)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ "{{worker_name}}" }}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "CPU Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "logBase": 1, + "max": "100", + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 10, + "x": 14, + "y": 0 + }, + "hiddenSeries": false, + "id": 11, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.0.5", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "avg(worker_load_rank) by (worker_name)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ "{{worker_name}}" }}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Rank", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:325", + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:326", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#d44a3a", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 2 + }, + { + "color": "#299c46", + "value": 3 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 7 + }, + "id": 6, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.0.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(job_posters_active) by (job)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Job Posters", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "decimals": 0, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 9, + "x": 4, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.0.5", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(worker_load_tasks) by (worker_name)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ "{{worker_name}}" }}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Tasks Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "none", + "label": "Count", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": { + "Failed": "#bf1b00", + "Killed": "#f4d598" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "decimals": 0, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 11, + "x": 13, + "y": 7 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.0.5", + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(increase(jobs_succeeded[30d])) by (job)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Succeeded", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(increase(jobs_failed[30d])) by (job)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Failed", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(increase(jobs_killed[30d])) by (job)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "Cancelled", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Jobs", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:180", + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:181", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 0, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#d44a3a", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 2 + }, + { + "color": "#299c46", + "value": 3 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 0, + "y": 10 + }, + "id": 7, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.0.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(increase(jobs_posted[1d])) by (job)", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Posted", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 0, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#d44a3a", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 2 + }, + { + "color": "#299c46", + "value": 3 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 2, + "y": 10 + }, + "id": 10, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "9.0.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum(increase(jobs_completed[1d])) by (job)", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Completed", + "type": "stat" + } + ], + "refresh": "1m", + "schemaVersion": 36, + "style": "dark", + "tags": [ + "clusterplex", + "prometheus" + ], + "templating": { + "list": [] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "ClusterPlex", + "uid": "JvxZbcigz", + "version": 5, + "weekStart": "" + } \ No newline at end of file diff --git a/charts/clusterplex/templates/orchestrator.yaml b/charts/clusterplex/templates/orchestrator.yaml index b7fa96b..a778182 100644 --- a/charts/clusterplex/templates/orchestrator.yaml +++ b/charts/clusterplex/templates/orchestrator.yaml @@ -12,6 +12,14 @@ configMaps: TZ: '{{ .Values.global.timezone | default "America/Chicago" }}' LISTENING_PORT: '{{ .Values.orchestrator.config.port | default "3500" }}' WORKER_SELECTION_STRATEGY: '{{ .Values.orchestrator.config.workerSelectionStrategy | default "LOAD_RANK" }}' + {{ if .Values.orchestrator.enableGrafanaDashboard }} + grafana-dashboard: + enabled: true + labels: + grafana_dashboard: "1" + data: + {{ (.Files.Glob "files/orchestrator-grafana-dashboard.json").AsConfig | nindent 6 }} + {{- end }} envFrom: - configMapRef: @@ -32,6 +40,23 @@ service: port: '{{ .Values.orchestrator.config.port | default "3500" }}' protocol: TCP +serviceMonitor: + main: + enabled: '{{ .Values.orchestrator.prometheusServiceMonitor.enabled | default "false" }}' + annotations: + {{- toYaml .Values.orchestrator.prometheusServiceMonitor.annotations | nindent 6 }} + labels: + {{- toYaml .Values.orchestrator.prometheusServiceMonitor.labels | nindent 6 }} + selector: + {{- toYaml .Values.orchestrator.prometheusServiceMonitor.customSelector | nindent 6 }} + serviceName: {{ .Release.Name }}-orchestrator + endpoints: + - port: http + scheme: http + path: /metrics + interval: '{{ .Values.orchestrator.prometheusServiceMonitor.scrapeInterval | default "30s" }}' + scrapeTimeout: '{{ .Values.orchestrator.prometheusServiceMonitor.scrapeTimeout | default "10s" }}' + probes: startup: enabled: false diff --git a/charts/clusterplex/values.yaml b/charts/clusterplex/values.yaml index d2788b1..5b09dc4 100644 --- a/charts/clusterplex/values.yaml +++ b/charts/clusterplex/values.yaml @@ -221,14 +221,14 @@ pms: resources: requests: # -- CPU Request amount - cpu: 2 + cpu: 2000m # Memory Request Amount memory: 2Gi limits: # -- CPU Limit amount - cpu: 4 + cpu: 4000m # -- Memory Limit amount memory: 4Gi @@ -296,6 +296,35 @@ orchestrator: # -- Provide additional labels which may be required. labels: {} + # -- Configure a ServiceMonitor for use with Prometheus monitoring + # @default -- See below + prometheusServiceMonitor: + # -- Enable the ServiceMonitor creation + enabled: false + + # -- Provide additional additions which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Provide a custom selector if desired. Note that this will take precedent over the default + # method of using the orchestrators namespace. This usually should not be required. + customSelector: {} + + # -- Configure how often Prometheus should scrape this metrics endpoint in seconds + scrapeInterval: 30s + + # -- Configure how long Prometheus should wait for the endpoint to reply before + # considering the request to have timed out. + scrapeTimeout: 10s + + # -- Configures if the Grafana dashboard for the orchestrator component is deployed to the cluster or not. + # If enabled, this creates a ConfigMap containing the dashboard JSON so that your Gradana instance can detect it. + # This requires your grafana instance to have the grafana.sidecar.dashboards.enabled to be true and the searchNamespace + # to be set to ALL otherwise this will not be discovered. + enableGrafanaDashboard: false + # -- Configure the resource requests and limits for the orchestrator component # @default -- See below resources: From fdcc4ee09122106d5e86ab89bfce4a64d5492d09 Mon Sep 17 00:00:00 2001 From: Brandan Schmitz Date: Thu, 8 Jun 2023 03:47:48 -0500 Subject: [PATCH 3/5] Allow disabling probes --- charts/clusterplex/README.md | 12 +++++++ .../clusterplex/templates/orchestrator.yaml | 15 ++++++++ charts/clusterplex/templates/pms.yaml | 19 ++++++++-- charts/clusterplex/templates/worker.yaml | 15 ++++++++ charts/clusterplex/values.yaml | 36 +++++++++++++++++++ 5 files changed, 95 insertions(+), 2 deletions(-) diff --git a/charts/clusterplex/README.md b/charts/clusterplex/README.md index 9a916fb..5e95014 100644 --- a/charts/clusterplex/README.md +++ b/charts/clusterplex/README.md @@ -94,6 +94,10 @@ $ helm install clusterplex clusterplex/clusterplex | pms.configVolume.accessMode | string | `"ReadWriteOnce"` | AccessMode for the persistent volume. Make sure to select an access mode that is supported by your storage provider! [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) | | pms.configVolume.size | string | `"25Gi"` | The amount of storage that is requested for the persistent volume. | | pms.configVolume.retain | bool | `true` | Set to true to retain the PVC upon `helm uninstall` | +| pms.healthProbes | object | See below | Enable or disable the various health check probes for this component | +| pms.healthProbes.startup | bool | `true` | Enable or disable the startup probe | +| pms.healthProbes.readiness | bool | `true` | Enable or disable the readiness probe | +| pms.healthProbes.liveness | bool | `true` | Enable or disable the liveness probe | | pms.resources | object | See below | Configure the resource requests and limits for the PMS component | | pms.resources.requests.cpu | string | `"2000m"` | CPU Request amount | | pms.resources.limits.cpu | string | `"4000m"` | CPU Limit amount | @@ -119,6 +123,10 @@ $ helm install clusterplex clusterplex/clusterplex | orchestrator.prometheusServiceMonitor.scrapeInterval | string | `"30s"` | Configure how often Prometheus should scrape this metrics endpoint in seconds | | orchestrator.prometheusServiceMonitor.scrapeTimeout | string | `"10s"` | Configure how long Prometheus should wait for the endpoint to reply before considering the request to have timed out. | | orchestrator.enableGrafanaDashboard | bool | `false` | Configures if the Grafana dashboard for the orchestrator component is deployed to the cluster or not. If enabled, this creates a ConfigMap containing the dashboard JSON so that your Gradana instance can detect it. This requires your grafana instance to have the grafana.sidecar.dashboards.enabled to be true and the searchNamespace to be set to ALL otherwise this will not be discovered. | +| orchestrator.healthProbes | object | See below | Enable or disable the various health check probes for this component | +| orchestrator.healthProbes.startup | bool | `true` | Enable or disable the startup probe | +| orchestrator.healthProbes.readiness | bool | `true` | Enable or disable the readiness probe | +| orchestrator.healthProbes.liveness | bool | `true` | Enable or disable the liveness probe | | orchestrator.resources | object | See below | Configure the resource requests and limits for the orchestrator component | | orchestrator.resources.requests.cpu | string | `"200m"` | CPU Request amount | | orchestrator.resources.limits.cpu | string | `"500m"` | CPU Limit amount | @@ -142,6 +150,10 @@ $ helm install clusterplex clusterplex/clusterplex | worker.codecVolumes.accessMode | string | `"ReadWriteOnce"` | AccessMode for the persistent volume. Make sure to select an access mode that is supported by your storage provider! [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) | | worker.codecVolumes.size | string | `"1Gi"` | The size of the volume | | worker.codecVolumes.storageClass | string | `nil` | Storage Class for the codec volumes If set to `-`, dynamic provisioning is disabled. If set to something else, the given storageClass is used. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. | +| worker.healthProbes | object | See below | Enable or disable the various health check probes for this component | +| worker.healthProbes.startup | bool | `true` | Enable or disable the startup probe | +| worker.healthProbes.readiness | bool | `true` | Enable or disable the readiness probe | +| worker.healthProbes.liveness | bool | `true` | Enable or disable the liveness probe | | worker.resources | object | See below | Configure the resource requests and limits for the worker component | | worker.resources.requests.cpu | string | `"2000m"` | CPU Request amount | | worker.resources.requests.memory | string | `"3Gi"` | Memory Request Amount | diff --git a/charts/clusterplex/templates/orchestrator.yaml b/charts/clusterplex/templates/orchestrator.yaml index a778182..e692198 100644 --- a/charts/clusterplex/templates/orchestrator.yaml +++ b/charts/clusterplex/templates/orchestrator.yaml @@ -58,6 +58,7 @@ serviceMonitor: scrapeTimeout: '{{ .Values.orchestrator.prometheusServiceMonitor.scrapeTimeout | default "10s" }}' probes: + {{ if .Values.orchestrator.healthProbes.startup }} startup: enabled: false custom: true @@ -68,6 +69,11 @@ probes: port: http periodSeconds: 10 failureThreshold: 15 + {{ else }} + startup: + enabled: false + {{- end }} + {{ if .Values.orchestrator.healthProbes.readiness }} readiness: enabled: true custom: true @@ -81,6 +87,11 @@ probes: periodSeconds: 10 successThreshold: 1 failureThreshold: 15 + {{ else }} + readiness: + enabled: false + {{- end }} + {{ if .Values.orchestrator.healthProbes.liveness }} liveness: enabled: true custom: true @@ -94,6 +105,10 @@ probes: periodSeconds: 10 successThreshold: 1 failureThreshold: 3 + {{ else }} + liveness: + enabled: false + {{- end }} {{- end }} {{ if .Values.orchestrator.enabled }} diff --git a/charts/clusterplex/templates/pms.yaml b/charts/clusterplex/templates/pms.yaml index 12f586e..f64af96 100644 --- a/charts/clusterplex/templates/pms.yaml +++ b/charts/clusterplex/templates/pms.yaml @@ -104,8 +104,9 @@ persistence: {{- end }} probes: + {{ if .Values.pms.healthProbes.startup }} startup: - enabled: false + enabled: true custom: true spec: httpGet: @@ -114,6 +115,11 @@ probes: port: plex periodSeconds: 10 failureThreshold: 30 + {{ else }} + startup: + enabled: false + {{- end }} + {{ if .Values.pms.healthProbes.readiness }} readiness: enabled: true custom: true @@ -127,6 +133,11 @@ probes: periodSeconds: 10 successThreshold: 1 failureThreshold: 30 + {{ else }} + readiness: + enabled: false + {{- end }} + {{ if .Values.pms.healthProbes.liveness }} liveness: enabled: true custom: true @@ -139,7 +150,11 @@ probes: timeoutSeconds: 5 periodSeconds: 10 successThreshold: 1 - failureThreshold: 3 + failureThreshold: 6 + {{ else }} + liveness: + enabled: false + {{- end }} {{- end }} {{ if .Values.pms.enabled }} diff --git a/charts/clusterplex/templates/worker.yaml b/charts/clusterplex/templates/worker.yaml index 634a0e0..e0b922b 100644 --- a/charts/clusterplex/templates/worker.yaml +++ b/charts/clusterplex/templates/worker.yaml @@ -99,6 +99,7 @@ volumeClaimTemplates: {{- end }} probes: + {{ if .Values.worker.healthProbes.startup }} startup: enabled: false custom: true @@ -109,6 +110,11 @@ probes: port: http periodSeconds: 10 failureThreshold: 40 + {{ else }} + startup: + enabled: false + {{- end }} + {{ if .Values.worker.healthProbes.readiness }} readiness: enabled: true custom: true @@ -122,6 +128,11 @@ probes: periodSeconds: 10 successThreshold: 1 failureThreshold: 15 + {{ else }} + readiness: + enabled: false + {{- end }} + {{ if .Values.worker.healthProbes.liveness }} liveness: enabled: true custom: true @@ -135,6 +146,10 @@ probes: periodSeconds: 10 successThreshold: 1 failureThreshold: 3 + {{ else }} + liveness: + enabled: false + {{- end }} {{- end }} {{ if .Values.worker.enabled }} diff --git a/charts/clusterplex/values.yaml b/charts/clusterplex/values.yaml index 5b09dc4..b7eb61c 100644 --- a/charts/clusterplex/values.yaml +++ b/charts/clusterplex/values.yaml @@ -216,6 +216,18 @@ pms: # -- Set to true to retain the PVC upon `helm uninstall` retain: true + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + # -- Configure the resource requests and limits for the PMS component # @default -- See below resources: @@ -325,6 +337,18 @@ orchestrator: # to be set to ALL otherwise this will not be discovered. enableGrafanaDashboard: false + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + # -- Configure the resource requests and limits for the orchestrator component # @default -- See below resources: @@ -428,6 +452,18 @@ worker: # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. storageClass: + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + # -- Configure the resource requests and limits for the worker component # @default -- See below resources: From 7987486aa539f33568c82c8ecdc7509aea1dfd6d Mon Sep 17 00:00:00 2001 From: Brandan Schmitz Date: Thu, 6 Jul 2023 17:55:08 -0500 Subject: [PATCH 4/5] Updates for the helm chart --- .github/workflows/chart.yaml | 4 +- charts/clusterplex/Chart.yaml | 2 +- charts/clusterplex/README.md | 6 +- charts/clusterplex/custom-values.yaml | 508 +++++++++++++++++++++++ charts/clusterplex/templates/pms.yaml | 15 + charts/clusterplex/templates/worker.yaml | 9 +- charts/clusterplex/values.yaml | 10 +- 7 files changed, 544 insertions(+), 10 deletions(-) create mode 100644 charts/clusterplex/custom-values.yaml diff --git a/.github/workflows/chart.yaml b/.github/workflows/chart.yaml index a902442..3f356d4 100644 --- a/.github/workflows/chart.yaml +++ b/.github/workflows/chart.yaml @@ -2,8 +2,6 @@ name: Release Charts on: push: - branches: - - master tags: - 'v*.*.*' @@ -54,7 +52,7 @@ jobs: shell: bash working-directory: dest run: | - helm repo index . --url https://pabloromeo.github.io/clusterplex + helm repo index --url https://pabloromeo.github.io/clusterplex --merge index.yaml . - name: Commit changes uses: stefanzweifel/git-auto-commit-action@v4 diff --git a/charts/clusterplex/Chart.yaml b/charts/clusterplex/Chart.yaml index a0aee98..6d48d0e 100644 --- a/charts/clusterplex/Chart.yaml +++ b/charts/clusterplex/Chart.yaml @@ -12,5 +12,5 @@ sources: - https://github.com/pabloromeo/clusterplex - https://github.com/linuxserver/docker-plex - https://plex.tv -version: 1.0.1 +version: 1.1.0 appVersion: 1.4.5 \ No newline at end of file diff --git a/charts/clusterplex/README.md b/charts/clusterplex/README.md index 5e95014..c3b1ad9 100644 --- a/charts/clusterplex/README.md +++ b/charts/clusterplex/README.md @@ -1,6 +1,6 @@ # clusterplex -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.4](https://img.shields.io/badge/AppVersion-1.4.4-informational?style=flat-square) +![Version: 1.1.0](https://img.shields.io/badge/Version-1.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.5](https://img.shields.io/badge/AppVersion-1.4.5-informational?style=flat-square) ClusterPlex is basically an extended version of Plex, which supports distributed Workers across a cluster to handle transcoding requests. @@ -67,11 +67,13 @@ $ helm install clusterplex clusterplex/clusterplex | pms.env | string | `nil` | Additional environment variables. Template enabled. Syntax options: A) TZ: UTC B) PASSWD: '{{ .Release.Name }}' C) PASSWD: configMapKeyRef: name: config-map-name key: key-name D) PASSWD: valueFrom: secretKeyRef: name: secret-name key: key-name ... E) - name: TZ value: UTC F) - name: TZ value: '{{ .Release.Name }}' | | pms.config | object | See below | Supply the configuration items used to configure the PMS component | | pms.config.transcoderVerbose | int | `1` | Set this to 1 if you want only info logging from the transcoder or 0 if you want debugging logs | -| pms.config.transcodeOperatingMode | string | `"both"` | Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). You MUST set this to local if you disable the worker installation. | +| pms.config.transcodeOperatingMode | string | `"both"` | Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). If you disable the worker then this will be set to local automatically as that is the only valid option for that confguration. | | pms.config.plexClaimToken | string | `nil` | Set the Plex claim token obtained from https://plex.tv/claim | | pms.config.version | string | `"docker"` | Set the version of Plex to use. Valid options are docker, latest, public, or a specific version. [[ref](https://github.com/linuxserver/docker-plex#application-setup)] | | pms.config.port | int | `32400` | The port that Plex will listen on | +| pms.config.localRelayEnabled | bool | `true` | Enable or disable the local relay function. In most cases this should be left to the default (true). If you disable this, you must add the pod IP address of each worker or the pod network CIDR to Plex under the `List of IP addresses and networks that are allowed without auth` option in Plex's network configuration. | | pms.config.relayPort | int | `32499` | The port that the relay service will listen on | +| pms.config.pmsIP | string | `""` | The IP address that plex is using. This is only utilized if you disable the localRelayEnabled option above. | | pms.serviceConfig | object | See below | Configure the kubernetes service associated with the the PMS component | | pms.serviceConfig.externalTrafficPolicy | string | `nil` | Specify the externalTrafficPolicy for the service. Options: Cluster, Local [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] | | pms.serviceConfig.annotations | object | `{}` | Provide additional annotations which may be required. | diff --git a/charts/clusterplex/custom-values.yaml b/charts/clusterplex/custom-values.yaml new file mode 100644 index 0000000..8fe7474 --- /dev/null +++ b/charts/clusterplex/custom-values.yaml @@ -0,0 +1,508 @@ +global: + # -- Configure the plex image that will be used for the PMS and Worker components + # @default -- See below + plexImage: + # -- The image that will be used + repository: linuxserver/plex + + # -- The image tag to use + tag: latest + + # -- Defines when the image should be pulled. Options are Always (default), IfNotPresent, and Never + imagePullPolicy: Always + + # -- The CluterPlex version of docker mod images to pull + # @default -- The appVersion for this chart + clusterplexVersion: + + # -- The timezone configured for each pod + timezone: America/Chicago + + # -- The process group ID that the LinuxServer Plex container will run Plex/Worker as. + PGID: 1000 + + # -- The process user ID that the LinuxServer Plex container will run Plex/Worker as. + PUID: 1000 + + sharedStorage: + # -- Configure the volume that will be mounted to the PMS and worker pods for a shared location for transcoding files. + # @default -- See below + transcode: + # -- Enable or disable the transcode PVC. This should only be disabled if you are not using the workers. + enabled: true + + # -- Storage class for the transcode volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + # NOTE: This class must support ReadWriteMany otherwise you will encounter errors. + storageClass: ceph-filesystem + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- The size of the transcode volume. + size: 10Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Configure the media volume that will contain all of your media. If you need more volumes you need to add them under + # the pms and worker sections manually. Those volumes must already be present in the cluster. + # @default -- See below + media: + # -- Enables or disables the volume + enabled: true + + # -- Storage Class for the config volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + # NOTE: This class must support ReadWriteMany otherwise you will encounter errors. + storageClass: ceph-filesystem + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- The amount of storage that is requested for the persistent volume. + size: 100Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Use this section to add additional media mounts if necessary. You can copy the contents of the above media + additionalMediaVolumes: {} + + + + +# -- Configure the Plex Media Server component +# @default -- See below +pms: + # -- Enable or disable the Plex Media Server component + enabled: true + + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the PMS component + # @default -- See below + config: + # -- Set this to 1 if you want only info logging from the transcoder or 0 if you want debugging logs + transcoderVerbose: 1 + + # -- Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). + # If you disable the worker then this will be set to local automatically as that is the only valid option for that confguration. + transcodeOperatingMode: both + + # -- Set the Plex claim token obtained from https://plex.tv/claim + plexClaimToken: + + # -- Set the version of Plex to use. Valid options are docker, latest, public, or a specific version. + # [[ref](https://github.com/linuxserver/docker-plex#application-setup)] + version: docker + + # -- The port that Plex will listen on + port: 32400 + + # -- Enable or disable the local relay function. In most cases this should be left to the default (true). + # If you disable this, you must add the pod IP address of each worker or the pod network CIDR to Plex under the + # `List of IP addresses and networks that are allowed without auth` option in Plex's network configuration. + localRelayEnabled: false + + # -- The port that the relay service will listen on + relayPort: 32499 + + # -- The IP address that plex is using. This is only utilized if you disable the localRelayEnabled option above. + pmsIP: "" + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Configure the ingress for plex here. + # @default -- See below + ingressConfig: + # -- Enables or disables the ingress + enabled: false + + # -- Provide additional annotations which may be required. + annotations: + {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + # -- Provide additional labels which may be required. + labels: {} + + # -- Set the ingressClass that is used for this ingress. + ingressClassName: # "nginx" + + ## Configure the hosts for the ingress + hosts: + - # -- Host address. Helm template can be passed. + host: chart-example.local + ## Configure the paths for the host + paths: + - # -- Path. Helm template can be passed. + path: / + pathType: Prefix + service: + # -- Overrides the service name reference for this path + name: + # -- Overrides the service port reference for this path + port: + + # -- Configure TLS for the ingress. Both secretName and hosts can process a Helm template. + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # -- Configure the volume that stores all the Plex configuration and metadata + # @default -- See below + configVolume: + # -- Enables or disables the volume + enabled: true + + # -- Storage Class for the config volume. + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + storageClass: # "-" + + # -- If you want to reuse an existing claim, the name of the existing PVC can be passed here. + existingClaim: # your-claim + + # -- Used in conjunction with `existingClaim`. Specifies a sub-path inside the referenced volume instead of its root + subPath: # some-subpath + + # -- AccessMode for the persistent volume. + # Make sure to select an access mode that is supported by your storage provider! + # [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + accessMode: ReadWriteOnce + + # -- The amount of storage that is requested for the persistent volume. + size: 25Gi + + # -- Set to true to retain the PVC upon `helm uninstall` + retain: true + + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + + # -- Configure the resource requests and limits for the PMS component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 2000m + + # Memory Request Amount + memory: 2Gi + + limits: + # -- CPU Limit amount + cpu: 4000m + + # -- Memory Limit amount + memory: 4Gi + + + + +# -- Configure the orchestrator component +# @default -- See below +orchestrator: + # -- Enable or disable the Orchestrator component + enabled: true + + image: + # -- image repository + repository: ghcr.io/pabloromeo/clusterplex_orchestrator + + # -- image pull policy + pullPolicy: IfNotPresent + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the Orchestrator component + # @default -- See below + config: + # -- The port that the Orchestrator will listen on + port: 3500 + + # -- Configures how the worker is chosen when a transcoding job is initiated. + # Options are LOAD_CPU, LOAD_TASKS, RR, and LOAD_RANK (default). + # [[ref]](https://github.com/pabloromeo/clusterplex/tree/master/docs#orchestrator) + workerSelectionStrategy: LOAD_RANK + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # -- Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Configure a ServiceMonitor for use with Prometheus monitoring + # @default -- See below + prometheusServiceMonitor: + # -- Enable the ServiceMonitor creation + enabled: false + + # -- Provide additional additions which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Provide a custom selector if desired. Note that this will take precedent over the default + # method of using the orchestrators namespace. This usually should not be required. + customSelector: {} + + # -- Configure how often Prometheus should scrape this metrics endpoint in seconds + scrapeInterval: 30s + + # -- Configure how long Prometheus should wait for the endpoint to reply before + # considering the request to have timed out. + scrapeTimeout: 10s + + # -- Configures if the Grafana dashboard for the orchestrator component is deployed to the cluster or not. + # If enabled, this creates a ConfigMap containing the dashboard JSON so that your Gradana instance can detect it. + # This requires your grafana instance to have the grafana.sidecar.dashboards.enabled to be true and the searchNamespace + # to be set to ALL otherwise this will not be discovered. + enableGrafanaDashboard: false + + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + + # -- Configure the resource requests and limits for the orchestrator component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 200m + + # Memory Request Amount + memory: 64Mi + + limits: + # -- CPU Limit amount + cpu: 500m + + # -- Memory Limit amount + memory: 128Mi + + + + +# -- Configure the worker component +# @default -- See below +worker: + # -- Enable or disable the Worker component + enabled: true + + # -- Additional environment variables. Template enabled. + # Syntax options: + # A) TZ: UTC + # B) PASSWD: '{{ .Release.Name }}' + # C) PASSWD: + # configMapKeyRef: + # name: config-map-name + # key: key-name + # D) PASSWD: + # valueFrom: + # secretKeyRef: + # name: secret-name + # key: key-name + # ... + # E) - name: TZ + # value: UTC + # F) - name: TZ + # value: '{{ .Release.Name }}' + env: + + # -- Supply the configuration items used to configure the worker component + # @default -- See below + config: + # -- The number of instances of the worker to run + replicas: 0 + + # -- The port the worker will expose its metrics on for the orchestrator to find + port: 3501 + + # -- The frequency at which workers send stats to the orchestrator in ms + cpuStatInterval: 10000 + + # -- Controls usage of the EasyAudioDecoder 1 = ON (default) and 0 = OFF + eaeSupport: 1 + + # -- Configure the kubernetes service associated with the the PMS component + # @default -- See below + serviceConfig: + # Configure the type of service + type: ClusterIP + + # -- Specify the externalTrafficPolicy for the service. Options: Cluster, Local + # [[ref](https://kubernetes.io/docs/tutorials/services/source-ip/)] + externalTrafficPolicy: + + # -- Provide additional annotations which may be required. + annotations: {} + + # -- Provide additional labels which may be required. + labels: {} + + # -- Enable or disable the per-pod volumes that cache the codecs. This saves a great deal of time when starting the workers. + # @default -- See below + codecVolumes: + # -- Enable or disable the creation of the codec volumes + enabled: true + + # -- Add any extra labels needed + labels: {} + + # -- Add any extra annotations needed + annotations: {} + + # -- AccessMode for the persistent volume. + # Make sure to select an access mode that is supported by your storage provider! + # [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + accessMode: ReadWriteOnce + + # -- The size of the volume + size: 1Gi + + # -- Storage Class for the codec volumes + # If set to `-`, dynamic provisioning is disabled. + # If set to something else, the given storageClass is used. + # If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. + storageClass: + + # -- Enable or disable the various health check probes for this component + # @default -- See below + healthProbes: + # -- Enable or disable the startup probe + startup: true + + # -- Enable or disable the readiness probe + readiness: true + + # -- Enable or disable the liveness probe + liveness: true + + # -- Configure the resource requests and limits for the worker component + # @default -- See below + resources: + requests: + # -- CPU Request amount + cpu: 2000m + + # -- Memory Request Amount + memory: 3Gi + + limits: + # -- CPU Limit amount + cpu: 4000m + + # -- Memory Limit amount + memory: 6Gi + + # -- Configure the affinity rules for the worker pods. This helps prevent multiple worker pods from + # being scheduled on the same node as another worker pod or as the main plex media server. + affinity: {} + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - podAffinityTerm: + # labelSelector: + # matchLabels: + # name: clusterplex-worker + # topologyKey: kubernetes.io/hostname + # weight: 100 + # - podAffinityTerm: + # labelSelector: + # matchLabels: + # name: clusterplex-pms + # topologyKey: kubernetes.io/hostname + # weight: 50 \ No newline at end of file diff --git a/charts/clusterplex/templates/pms.yaml b/charts/clusterplex/templates/pms.yaml index f64af96..eab2522 100644 --- a/charts/clusterplex/templates/pms.yaml +++ b/charts/clusterplex/templates/pms.yaml @@ -17,12 +17,27 @@ configMaps: PUID: '{{ .Values.global.PUID | default "1000" }}' DOCKER_MODS: 'ghcr.io/pabloromeo/clusterplex_dockermod:{{ .Values.global.clusterplexVersion | default .Chart.AppVersion }}' ORCHESTRATOR_URL: 'http://{{ .Release.Name }}-orchestrator:{{ .Values.orchestrator.config.port | default "3500" }}' + {{ if .Values.pms.config.localRelayEnabled }} PMS_SERVICE: '{{ .Release.Name }}-pms' + {{ else }} + {{- if not (regexMatch "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$" .Values.pms.config.pmsIP) }} + {{- fail "Invalid or empty pmsIP provided. This must be a valid IP address if localRelayEnabled is false." }} + {{- end }} + PMS_IP: '{{ .Values.pms.config.pmsIP }}' + {{- end }} PMS_PORT: '{{ .Values.pms.config.port | default "32400" }}' TRANSCODER_VERBOSE: '{{ .Values.pms.config.transcoderVerbose | default "1" }}' + {{ if .Values.worker.enabled }} TRANSCODE_OPERATING_MODE: '{{ .Values.pms.config.transcodeOperatingMode | default "both" }}' + {{ else }} + TRANSCODE_OPERATING_MODE: 'local' + {{- end }} + {{ if .Values.pms.config.localRelayEnabled }} LOCAL_RELAY_ENABLED: '1' LOCAL_RELAY_PORT: '{{ .Values.pms.config.relayPort | default "32499" }}' + {{ else }} + LOCAL_RELAY_ENABLED: '0' + {{- end }} {{ if .Values.pms.config.plexClaimToken }} secrets: diff --git a/charts/clusterplex/templates/worker.yaml b/charts/clusterplex/templates/worker.yaml index e0b922b..25d5ad6 100644 --- a/charts/clusterplex/templates/worker.yaml +++ b/charts/clusterplex/templates/worker.yaml @@ -9,7 +9,10 @@ image: controller: type: statefulset - replicas: '{{ .Values.worker.config.replicas | default 1 }}' + {{- if and .Values.worker.enabled (lt (.Values.worker.config.replicas | int) 1) }} + {{- fail "Invalid value for worker.config.replicas. Must be greater than or equal to 1 when worker.enabled is true." }} + {{- end }} + replicas: '{{ .Values.worker.config.replicas }}' configMaps: config: @@ -123,7 +126,7 @@ probes: scheme: HTTP path: /health port: http - initialDelaySeconds: 60 + initialDelaySeconds: 120 timeoutSeconds: 5 periodSeconds: 10 successThreshold: 1 @@ -141,7 +144,7 @@ probes: scheme: HTTP path: /health port: http - initialDelaySeconds: 60 + initialDelaySeconds: 120 timeoutSeconds: 5 periodSeconds: 10 successThreshold: 1 diff --git a/charts/clusterplex/values.yaml b/charts/clusterplex/values.yaml index b7eb61c..77c46b5 100644 --- a/charts/clusterplex/values.yaml +++ b/charts/clusterplex/values.yaml @@ -116,7 +116,7 @@ pms: transcoderVerbose: 1 # -- Set the transcode operating mode. Valid options are local (No workers), remote (only remote workers), both (default, remote first then local if remote fails). - # You MUST set this to local if you disable the worker installation. + # If you disable the worker then this will be set to local automatically as that is the only valid option for that confguration. transcodeOperatingMode: both # -- Set the Plex claim token obtained from https://plex.tv/claim @@ -129,9 +129,17 @@ pms: # -- The port that Plex will listen on port: 32400 + # -- Enable or disable the local relay function. In most cases this should be left to the default (true). + # If you disable this, you must add the pod IP address of each worker or the pod network CIDR to Plex under the + # `List of IP addresses and networks that are allowed without auth` option in Plex's network configuration. + localRelayEnabled: true + # -- The port that the relay service will listen on relayPort: 32499 + # -- The IP address that plex is using. This is only utilized if you disable the localRelayEnabled option above. + pmsIP: "" + # -- Configure the kubernetes service associated with the the PMS component # @default -- See below serviceConfig: From 1ce46830a65f68cd1257506775265cbd691c2465 Mon Sep 17 00:00:00 2001 From: Brandan Schmitz Date: Fri, 7 Jul 2023 15:46:32 -0500 Subject: [PATCH 5/5] Add charts tag and do not build apps if only chart is updated --- .github/workflows/chart.yaml | 1 + .github/workflows/lscr.yml | 12 ++++++++---- .github/workflows/main.yml | 4 ++++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/chart.yaml b/.github/workflows/chart.yaml index 3f356d4..d190237 100644 --- a/.github/workflows/chart.yaml +++ b/.github/workflows/chart.yaml @@ -4,6 +4,7 @@ on: push: tags: - 'v*.*.*' + - 'chart-v*.*.*' jobs: release: diff --git a/.github/workflows/lscr.yml b/.github/workflows/lscr.yml index 3424ec3..c260147 100644 --- a/.github/workflows/lscr.yml +++ b/.github/workflows/lscr.yml @@ -12,15 +12,19 @@ on: - 'experimental' tags: - 'v*.*.*' + paths-ignore: + - 'charts/**' pull_request: branches: - 'master' - 'dev' + paths-ignore: + - 'charts/**' -permissions: +permissions: contents: read packages: write - + # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: build-pms-docker-mod: @@ -29,7 +33,7 @@ jobs: # Get the repositery's code - name: Checkout uses: actions/checkout@v3 - + # https://github.com/docker/setup-buildx-action - name: Set up Docker Buildx id: buildx @@ -90,7 +94,7 @@ jobs: # Get the repositery's code - name: Checkout uses: actions/checkout@v3 - + # https://github.com/docker/setup-buildx-action - name: Set up Docker Buildx id: buildx diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3d94df2..90de744 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -14,10 +14,14 @@ on: - "experimental" tags: - "v*.*.*" + paths-ignore: + - 'charts/**' pull_request: branches: - "master" - "dev" + paths-ignore: + - 'charts/**' permissions: contents: read