-
Notifications
You must be signed in to change notification settings - Fork 1
/
values.yaml
210 lines (206 loc) · 9.08 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
---
# nameOverride: dask
# fullnameOverride: dask
scheduler:
name: scheduler # Dask scheduler name.
enabled: true # Enable/disable scheduler.
image:
# repository: "awightma/coffea-custom-docker_no-patch" # Container image repository.
# tag: 2022.01.14
repository: "awightma/coffea-custom-docker" # Container image repository.
tag: 2021.12.17 # Container image tag.
pullPolicy: Always # Container image pull policy.
pullSecrets: # Container image [pull secrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/).
# - name: regcred
replicas: 1 # Number of schedulers (should always be 1).
serviceType: "ClusterIP" # Scheduler service type. Set to `LoadBalancer` to expose outside of your cluster.
# serviceType: "NodePort"
# serviceType: "LoadBalancer"
loadBalancerIP: null # Some cloud providers allow you to specify the loadBalancerIP when using the `LoadBalancer` service type. If your cloud does not support it this option will be ignored.
servicePort: 8786 # Scheduler service internal port.
serviceAnnotations: {} # Scheduler service annotations.
extraArgs: []
# - --help
# - --nanny-contact-address:8001
# - --dashboard-address:8787
# - scheduler-setup.py
resources: {} # Scheduler pod resources. See `values.yaml` for example values.
# limits:
# cpu: 1.8
# memory: 6G
# requests:
# cpu: 1.8
# memory: 6G
tolerations: [] # Tolerations.
affinity: {} # Container affinity.
nodeSelector: {} # Node Selector.
securityContext: {} # Security Context.
# serviceAccountName: ""
metrics:
enabled: false # Enable scheduler metrics. Pip package [prometheus-client](https://pypi.org/project/prometheus-client/) should be present on scheduler.
serviceMonitor:
enabled: false # Enable scheduler servicemonitor.
namespace: "" # Deploy servicemonitor in different namespace, e.g. monitoring.
namespaceSelector: {} # Selector to select which namespaces the Endpoints objects are discovered from.
# Default: scrape .Release.Namespace only
# To scrape all, use the following:
# namespaceSelector:
# any: true
additionalLabels: {} # Additional labels to add to the ServiceMonitor metadata.
interval: 30s # Interval at which metrics should be scraped.
jobLabel: "" # The label to use to retrieve the job name from.
targetLabels: [] # TargetLabels transfers labels on the Kubernetes Service onto the target.
metricRelabelings: [] # MetricRelabelConfigs to apply to samples before ingestion.
webUI:
name: webui # Dask webui name.
servicePort: 80 # webui service internal port.
ingress:
enabled: false # Enable ingress.
tls: false # Ingress should use TLS.
# secretName: dask-scheduler-tls
hostname: dask-ui.example.com # Ingress hostname.
annotations: # Ingress annotations. See `values.yaml` for example values.
# kubernetes.io/ingress.class: "nginx"
# secretName: my-tls-cert
# kubernetes.io/tls-acme: "true"
worker:
name: worker # Dask worker name.
servicePort: 8001
serviceType: "ClusterIP"
image:
# repository: "awightma/coffea-custom-docker_no-patch" # Container image repository.
# tag: 2022.01.14
repository: "awightma/coffea-custom-docker" # Container image repository.
tag: 2021.12.17 # Container image tag.
pullPolicy: Always # Container image pull policy.
dask_worker: "dask-worker" # Dask worker command. E.g `dask-cuda-worker` for GPU worker.
pullSecrets: # Container image [pull secrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/).
# - name: regcred
replicas: 1 # Number of workers.
strategy:
type: RollingUpdate # Strategy used to replace old Pods with new ones.
custom_scheduler_url: null # connect to already existing scheduler, deployed not by this chart.
default_resources: # overwritten by resource limits if they exist
cpu: 1 # Default CPU (DEPRECATED use `resources`).
memory: "4GiB" # Default memory (DEPRECATED use `resources`).
env: # Environment variables. See `values.yaml` for example values.
- name: COFFEA_CASA_SIDECAR
value: 'True'
# - name: EXTRA_CONDA_PACKAGES
# value: numba xarray -c conda-forge
# - name: EXTRA_PIP_PACKAGES
# value: s3fs dask-ml prometheus-client --upgrade
extraArgs: # Extra CLI arguments to be passed to the worker
# - --host
# - tcp://172.17.0.10
- --nanny-contact-address
- tcp://dask-test-worker:8001
# - tcp://172.17.0.10
- --nanny-port
- "8001"
# - --help
# - --preload
# - worker-setup.py
# resources: {} # Worker pod resources. See `values.yaml` for example values.
resources:
limits:
memory: 3G
# cpu: 1
# nvidia.com/gpu: 1
# requests:
# cpu: 1
# memory: 3G
# nvidia.com/gpu: 1
mounts: {} # Worker Pod volumes and volume mounts, mounts.volumes follows kuberentes api v1 Volumes spec. mounts.volumeMounts follows kubernetesapi v1 VolumeMount spec
# volumes:
# - name: data
# emptyDir: {}
# volumeMounts:
# - name: data
# mountPath: /data
annotations: {} # Annotations
tolerations: [] # Tolerations.
affinity: {} # Container affinity.
nodeSelector: {} # Node Selector.
securityContext: {} # Security Context.
# serviceAccountName: ""
# port: ""
portDashboard: 8790 # Worker dashboard and metrics port.
# this option overrides "--nthreads" on workers, which defaults to resources.limits.cpu / default_resources.limits.cpu
# use it if you need to limit the amount of threads used by multicore workers, or to make workers with non-whole-number cpu limits
# threads_per_worker: 1
metrics:
enabled: false # Enable workers metrics. Pip package [prometheus-client](https://pypi.org/project/prometheus-client/) should be present on workers.
podMonitor:
enabled: false # Enable workers podmonitor
namespace: "" # Deploy podmonitor in different namespace, e.g. monitoring.
namespaceSelector: {} # Selector to select which namespaces the Endpoints objects are discovered from.
# Default: scrape .Release.Namespace only
# To scrape all, use the following:
# namespaceSelector:
# any: true
additionalLabels: {} # Additional labels to add to the PodMonitor metadata.
interval: 30s # Interval at which metrics should be scraped.
jobLabel: "" # The label to use to retrieve the job name from.
podTargetLabels: [] # PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
metricRelabelings: [] # MetricRelabelConfigs to apply to samples before ingestion.
jupyter:
name: jupyter # Jupyter name.
enabled: true # Enable/disable the bundled Jupyter notebook.
rbac: true # Create RBAC service account and role to allow Jupyter pod to scale worker pods and access logs.
image:
repository: "daskdev/dask-notebook" # Container image repository.
tag: 2021.12.0 # Container image tag.
pullPolicy: IfNotPresent # Container image pull policy.
pullSecrets: # Container image [pull secrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/).
# - name: regcred
#
replicas: 1 # Number of notebook servers.
serviceType: "ClusterIP" # Scheduler service type. Set to `LoadBalancer` to expose outside of your cluster.
# serviceType: "NodePort"
# serviceType: "LoadBalancer"
servicePort: 80 # Jupyter service internal port.
# This hash corresponds to the password 'dask'
password: 'sha1:aae8550c0a44:9507d45e087d5ee481a5ce9f4f16f37a0867318c' # Password hash. Default hash corresponds to the password `dask`.
env: # Environment variables. See `values.yaml` for example values.
# - name: EXTRA_CONDA_PACKAGES
# value: "numba xarray -c conda-forge"
# - name: EXTRA_PIP_PACKAGES
# value: "s3fs dask-ml --upgrade"
command: null # Container command.
args: [] # Container arguments.
# - "start.sh"
# - "jupyter"
# - "lab"
extraConfig: |-
# Extra Jupyter config goes here
# E.g
# c.NotebookApp.port = 8888
resources: {} # Jupyter pod resources. See `values.yaml` for example values.
# limits:
# cpu: 2
# memory: 6G
# requests:
# cpu: 2
# memory: 6G
mounts: {} # Worker Pod volumes and volume mounts, mounts.volumes follows kuberentes api v1 Volumes spec. mounts.volumeMounts follows kubernetesapi v1 VolumeMount spec
# volumes:
# - name: data
# emptyDir: {}
# volumeMounts:
# - name: data
# mountPath: /data
tolerations: [] # Tolerations.
affinity: {} # Container affinity.
nodeSelector: {} # Node Selector.
securityContext: {} # Security Context.
serviceAccountName: "dask-jupyter" # Service account for use with RBAC
ingress:
enabled: false # Enable ingress.
tls: false # Ingress should use TLS.
# secretName: dask-jupyter-tls
hostname: dask-jupyter.example.com # Ingress hostname.
annotations: # Ingress annotations. See `values.yaml` for example values.
# kubernetes.io/ingress.class: "nginx"
# secretName: my-tls-cert
# kubernetes.io/tls-acme: "true"