From 89073e4d24bd66ebf36f74c4f54d3f498be5064b Mon Sep 17 00:00:00 2001 From: Alcides Mignoso e Silva Date: Mon, 19 Dec 2022 15:15:07 -0300 Subject: [PATCH] feat: initial chart --- Chart.yaml | 16 ++ README.md | 313 +++++++++++++++++++++ templates/_autoscaler_getkubeconfig.tpl | 5 + templates/_autoscaler_secret.tpl | 7 + templates/_cinder_csi_plugin.tpl | 81 ++++++ templates/_cni_calico.tpl | 29 ++ templates/_kubeconfig_ingress.tpl | 31 +++ templates/_nodeinitializer.tpl | 69 +++++ templates/_nodescript.tpl | 57 ++++ templates/_openstack_controller_mng.tpl | 61 +++++ templates/_rke_ingress_raw.tpl | 30 ++ templates/autoscaler.yaml | 232 ++++++++++++++++ templates/cluster.yaml | 203 ++++++++++++++ templates/clusterroletemplatebinding.yaml | 10 + templates/managedcharts.yaml | 50 ++++ templates/nodeconfig.yaml | 142 ++++++++++ templates/secret_autoscaler.yaml | 17 ++ values.yaml | 160 +++++++++++ values_example.yaml | 316 ++++++++++++++++++++++ 19 files changed, 1829 insertions(+) create mode 100644 Chart.yaml create mode 100644 README.md create mode 100644 templates/_autoscaler_getkubeconfig.tpl create mode 100644 templates/_autoscaler_secret.tpl create mode 100644 templates/_cinder_csi_plugin.tpl create mode 100644 templates/_cni_calico.tpl create mode 100644 templates/_kubeconfig_ingress.tpl create mode 100644 templates/_nodeinitializer.tpl create mode 100644 templates/_nodescript.tpl create mode 100644 templates/_openstack_controller_mng.tpl create mode 100644 templates/_rke_ingress_raw.tpl create mode 100644 templates/autoscaler.yaml create mode 100644 templates/cluster.yaml create mode 100644 templates/clusterroletemplatebinding.yaml create mode 100644 templates/managedcharts.yaml create mode 100644 templates/nodeconfig.yaml create mode 100644 templates/secret_autoscaler.yaml create mode 100644 values.yaml create mode 100644 values_example.yaml diff --git a/Chart.yaml b/Chart.yaml new file mode 100644 index 0000000..9080db5 --- /dev/null +++ b/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +name: cluster-rke2-openstack +description: Helm Chart for provisioning RKE2 Cluster on top of OpenStack +version: '0.1.0' +maintainers: + - name: eduardo.scheidet + email: eduardo.scheidet@luizalabs.com + - name: leonardo.martinsda + email: leonardo.martinsda@luizalabs.com + - name: alcidesmig + email: alcidesmig@gmail.com + - name: renato.guilhermini + email: renato.guilhermini@luizalabs.com +annotations: + catalog.cattle.io/type: cluster-rke2-openstack + catalog.cattle.io/namespace: fleet-default \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..dbfee23 --- /dev/null +++ b/README.md @@ -0,0 +1,313 @@ +# Kubernetes RKE2 Cluster on OpenStack Helm Chart + +This repository contains a Helm Chart for deploying RKE2 clusters on top of OpenStack. + +## Prerequisites + +This Chart was tested on: +- OpenStack Ussuri & Yoga +- Rancher v2.6.6 and Rancher v2.6.9 + +The docker images used as dependencies for this chart are: +- [fork of kubernetes/autoscaler](https://github.com/alcidesmig/autoscaler): https://hub.docker.com/r/luizalabscicdmgc/cluster-autoscaler-amd64 +- [k8scloudprovider/cinder-csi-plugin](https://hub.docker.com/r/k8scloudprovider/cinder-csi-plugin) +- [k8scloudprovider/openstack-cloud-controller-manager](https://hub.docker.com/r/k8scloudprovider/openstack-cloud-controller-manager) +- [rancher/openstack-client](https://hub.docker.com/r/openstacktools/openstack-client) + +## Parameters + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `cloudprovider` | Cloud Provider | `"openstack"` | +| `imageRegistryURL` | Registry for pulling images (need to ends with "/" for non-empty) | "" | + +### Cloud Init + +Cloud Init sections allows execute of arbytrary code by [OpenStack cloud-init](https://cloudinit.readthedocs.io/en/latest/topics/examples.html). + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `cloudinit.enable` | Enable cloud-init | `false` | +| `cloudinit.sshPubKeys` | SSH public keys to be injected into cluster hosts | [] | +| `cloudinit.bootcmd` | Commands to be executed only during the first boot | Item1 | +| `cloudinit.runcmd` | Commands to be executed on cloud init | `["touch /tmp/cloud_init"]` | + +### OpenStack parameters + +OpenStack general configurations section. + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `openstack.authUrl` | OpenStack authentication URL | `https://openstack.example.com:5000` | +| `openstack.applicationCredentialId` | Application Credential ID for accessing OpenStack | `` | +| `openstack.applicationCredentialSecret` | Application Credential Secret for accessing OpenStack | `` | +| `openstack.availabilityZone` | Availability Zone name for disks | `nova` | +| `openstack.subnetID` | LB/Amphoras subnet ID (needs to be on same project network) | `` | +| `openstack.projectId` | Cluster project ID | `` | +| `openstack.tenantDomainName` | OpenStack Tenant Domain Name | `Default` | +| `openstack.tenantName` | OpenStack Project name | `` | +| `openstack.username` | Application credential's username | `` | +| `openstack.domainName` | Tenant Domain Name ID | `default` | +| `openstack.region` | OpenStack Region | `RegionOne` | +| `openstack.floatingNetID` | Network ID for FIPs | `` | +| `openstack.floatingSubnetID` | Subnet ID for FIPs | `` | +| `openstack.openstackClientImage` | Any docker image with OpenStack CLI | `openstacktools/openstack-client` | + +### Cluster parameters + +Cluster section allows general cluster configurations. + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `cluster.apiAddr` | Kubernetes service host address | `kubernetes.default.svc.cluster.local` | +| `cluster.apiPort` | Kubernetes service host port | `6443` | +| `cluster.additionalManifests` | Additional manifests to be created in the cluster | `[]` | +| `cluster.secretsEncryption` | Enable RKE secrets encryption | `false` | +| `cluster.name` | Cluster name | `placeholder-cluster-name` | +| `cluster.kubernetesVersion` | Kubernetes version | `v1.21.14+rke2r1` | + +### Cluster nodes upgrade strategy + +Refers to [kubectl drain options](https://manpages.debian.org/unstable/kubernetes-client/kubectl-drain.1.en.html), access it for more info. + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `cluster.upgradeStrategy.controlPlaneDrainOptions.enabled` | Enable Control Plane nodes drain custom options | `false` | +| `cluster.upgradeStrategy.controlPlaneDrainOptions.deleteEmptyDirData` | Enable Control Plane drain custom options | `false` | +| `cluster.upgradeStrategy.controlPlaneDrainOptions.disableEviction` | Force drain to use delete, even if eviction is supported | `false` | +| `cluster.upgradeStrategy.controlPlaneDrainOptions.gracePeriod` | Period of time in seconds given to each pod to terminate gracefully | `0` | +| `cluster.upgradeStrategy.controlPlaneDrainOptions.ignoreErrors` | Ignore errors | `false` | +| `cluster.upgradeStrategy.controlPlaneDrainOptions.skipWaitForDeleteTimeoutSeconds` | If pod DeletionTimestamp older than N seconds, skip waiting for the pod | `0` | +| `cluster.upgradeStrategy.controlPlaneDrainOptions.timeout` | Enable Control Plane drain custom options | `false` | +| `cluster.upgradeStrategy.workerDrainOptions.enabled` | Enable Worker nodes drain custom options | `false` | +| `cluster.upgradeStrategy.workerDrainOptions.deleteEmptyDirData` | Enable Control Plane drain custom options | `false` | +| `cluster.upgradeStrategy.workerDrainOptions.disableEviction` | Force drain to use delete, even if eviction is supported | `false` | +| `cluster.upgradeStrategy.workerDrainOptions.gracePeriod` | Period of time in seconds given to each pod to terminate gracefully | `0` | +| `cluster.upgradeStrategy.workerDrainOptions.ignoreErrors` | Ignore errors | `false` | +| `cluster.upgradeStrategy.workerDrainOptions.skipWaitForDeleteTimeoutSeconds` | If pod DeletionTimestamp older than N seconds, skip waiting for the pod | `0` | +| `cluster.upgradeStrategy.workerDrainOptions.timeout` | Timeout for draining nodes | `0` | + +### Cluster Autoscaler parameters + +This sections defines cluster's autoscaler. + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `cluster.autoscaler.enabled` | Enable cluster autoscaler | `false` | +| `cluster.autoscaler.rancherUrl` | Rancher URL | `https://rancher.placeholder.com` | +| `cluster.autoscaler.rancherToken` | Rancher Token for autoscaler | `rancher-token` | +| `cluster.autoscaler.image` | Cluster autoscaler image | `luizalabscicdmgc/cluster-autoscaler-amd64:dev` | + +### Cluster monitoring parameters + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `cluster.monitoring` | Install cluster Rancher monitoring | `false` | + +### RKE configurations parameters + +This section allow Rancher RKE2 configurations. + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `rke.rkeIngressChart.enabled` | Install cluster default ingress via ingress chart | `true` | +| `rke.rkeIngressChart.replicaCount` | Ingress replicas (for Ingress via chart) | `"1"` | +| `rke.rkeIngressChart.autoScaling.enabled` | Enable Ingress autoscaling (for Ingress via chart) | `true` | +| `rke.rkeIngressChart.autoScaling.minReplicas` | Ingress autoscaling minimum of replicas (for Ingress via chart) | `"1"` | +| `rke.rkeIngressChart.autoScaling.maxReplicas` | Ingress autoscaling maximum of replicas (for Ingress via chart) | `"3"` | +| `rke.rkeIngressRawManifest.enabled` | Install cluster default ingress via raw manifest | `false` | +| `rke.etcd.args` | Custom etcd args | `["quota-backend-bytes=858993459", "max-request-bytes=33554432"]` | +| `rke.etcd.exposeMetrics` | Expose etcd metrics | `true` | +| `rke.etcd.snapshotRetention` | etcd snapshot retention @TODO é days? | `5` | +| `rke.etcd.snapshotScheduleCron` | Snapshot cron schedule | `"0 */12 * * *"` | +| `rke.coredns.nodelocal.enabled` | Enable CoreDNS NodeLocal | `true` | +| `rke.openstackControllerManager.image` | openstack-cloud-controller-manager image (omit field for official) | `k8scloudprovider/openstack-cloud-controller-manager` | +| `rke.openstackControllerManager.tag` | openstack-cloud-controller-manager image tag (omit field for official) | `v1.24.0` | +| `rke.openstackControllerManager.enableLoadBalancerCreateMonitor` | Create Load Balancer monitor | `false` | +| `rke.openstackControllerManager.cinderCsiPlugin.image` | cinder-csi-plugin image (omit field for official) | `k8scloudprovider/cinder-csi-plugin` | +| `rke.openstackControllerManager.cinderCsiPlugin.tag` | cinder-csi-plugin image tag (omit field for official) | `v1.25.0` | +| `rke.agentEnvVars` | RKE Agent Environment vars | `[]` | +| `rke.kubeapi.args` | Custom args for KubeAPI | `[]` | +| `rke.kubelet.args` | Custom args for Kubelet | `[]` | +| `rke.localClusterAuthEndpoint.enabled` | Enable out-of-rancher cluster authentication | `false` | +| `rke.localClusterAuthEndpoint.fqdn` | FQDN for out-of-rancher cluster authentication | `example.rancher.local` | +| `rke.localClusterAuthEndpoint.secretName` | Certificate secret name for out-of-rancher cluster authentication | `example-rancher-local-secret` | +| `rke.tlsSan` | Cluster tls-san | `[]` | + +#### RKE registries configurations parameters + +This section allows configuring custom registries options (private registries + mirrors). `configs` and `mirrors` sections are exactly as https://docs.rke2.io/install/containerd_registry_configuration/. + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `rke.registries` | Enable custom registries configurations | `false` | +| `rke.configs` | [official doc](https://docs.rke2.io/install/containerd_registry_configuration/) | `` | +| `rke.mirrors` | [official doc](https://docs.rke2.io/install/containerd_registry_configuration/) | `` | + +#### Cluster custom scripts + +This section allows the installation of DaemonSets for running on cluster to do arbitrary tasks (example: mount a file from the host and change it). + +The `rke.nodeScripts` is a list of scripts following these structure: + +| Name | Description | Example | +|-------------- | -------------- | -------------- | +| `name` | Node script name | `script-example-1` | +| `runOnControlPlanes` | Run DaemonSet on control planes (+ workers) | `false` | +| `script` | Script to execute | `["/bin/bash", "-c", "touch /tmp/example_script"]` | +| `image` | Node script pod image | `"ubuntu:22.04"` | +| `pauseContainerImage` | Container for running after script | `pause:2.0` | +| `env` | Envs for script pod | Exactly as pods envs | +| `volumes.entries` | Pod volumes | Exactly as pod volume | +| `volumes.volumeMounts` | Pod volume mounts | Exactly as pod volume mounts | + +### Nodepools parameters + +This section defines the nodepools of the cluster. The `cluster.nodepools` section is a list of the following structure: + +| Name | Description | Example | +|-------------- | -------------- | -------------- | +| `name` | Nodepool name | `wa` | +| `netId` | Project network ID for Nodes | `83512d30-c8e1-4eb7-a5d0-8126b1d75ad2` | +| `availabilityZone` | Nodepool's availability zone | `ne1` | +| `quantity` | Nodepool quantity of nodes (override the autoscaler) | `wa` | +| `etcd` | Run etcd on nodes (enable it for Control Planes) | `false` | +| `worker` | Use nodepool's nodes as workers | `true` | +| `controlplane` | Use nodepool's nodes as control planes | `false` | +| `bootFromVolume` | Set false for using ephemeral disks | `false` | +| `volumeSize` | Volume size, only valid if `bootFromVolume=true` | `50` | +| `volumeDevicePath` | Volume device path, only valid if `bootFromVolume=true` | `/` | +| `volumeType` | Volume type, only valid if `bootFromVolume=true` | `_DEFAULT_` | +| `flavorName` | Nodes flavor | `general-1` | +| `imageName` | Nodes image | `ubuntu-20.04` | +| `secGroups` | Nodes security group | `default` | +| `keypairName` | Keypair name for accessing nodes | `keypair` | +| `sshUser` | SSH user | `ubuntu` | +| `sshPort` | SSH port | `22` | +| `activeTimeout` | Active timeout for nodes (time for instance to be running before recreation) | `900` | +| `nodeGroupMaxSize` | Nodepool max size (for autoscaler) | `10` | +| `nodeGroupMinSize` | Nodepool min size (for autoscaler) | `1` | +| `labels` | Nodes labels | Key-value of labels | +| `taints` | Nodes taints | List of taints as Kubernetes style | +| `unhealthyNodeTimeout` | Nodes unhealthy timeout | `5m` | +| `drainBeforeDelete` | Drain nodes before delete | `true` | + + +## CNI + +In order to use the Chart, it is necessary to define one [CNI](https://docs.ranchermanager.rancher.io/v2.5/faq/container-network-interface-providers) for the cluster. We've already tested the Chart using `canal`, `calico` and `cilium`. + +| Name | Description | Default value | +|-------------- | -------------- | -------------- | +| `cluster.cni.name` | CNI to be used | `cilium` | + + +### Using cni=canal + +For using the canal CNI, it is necessary to add on `spec.rkeConfig.chartValues` (`cluster.yaml` file) the following content: + +```yaml +rke2-canal: + calico: + vethuMTU: 1430 # MTU is configurable, we've tested with 1430 +``` + +It can be done adding this code section into chart values `rke.additionalChartValues` section. + +### Using cni=calico + +For using the calico CNI, it is necessary to add on `spec.rkeConfig.chartValues` (`cluster.yaml` file) the following content: + +```yaml +rke2-calico: + certs: + node: {} + typha: {} + installation: + calicoNetwork: + bgp: Enabled + ipPools: + - blockSize: 24 # can be changed + cidr: 10.42.0.0/16 # can be changed + encapsulation: IPIPCrossSubnet + natOutgoing: Enabled + mtu: 1430 # can be changed + ipamConfig: {} +``` + +It can be done adding this code section into chart values `rke.additionalChartValues` section. + + + +### Using cni=cilium + +For using the calico CNI, it is necessary to add on `spec.rkeConfig.chartValues` (`cluster.yaml` file) the following content: + +```yaml +rke2-cilium: + cilium: + mtu: 1430 # can be changed + hubble: + metrics: + enabled: + - dns:query;ignoreAAAA + - drop + - tcp + - flow + - icmp + - http + relay: + enabled: true + image: + repository: "cilium/hubble-relay" + tag: "v1.12.1" + ui: + backend: + image: + repository: "cilium/hubble-ui-backend" + tag: "v0.9.2" + enabled: true + frontend: + image: + repository: "cilium/hubble-ui" + tag: "v0.9.2" + replicas: 1 + image: + repository: "rancher/mirrored-cilium-cilium" + tag: "v1.12.1" + nodeinit: + image: + repository: "rancher/mirrored-cilium-startup-script" + tag: "d69851597ea019af980891a4628fb36b7880ec26" + operator: + image: + repository: "rancher/mirrored-cilium-operator" + tag: "v1.12.1" + preflight: + image: + repository: "rancher/mirrored-cilium-cilium" + tag: "v1.12.1" + kubeProxyReplacement: "strict" + k8sServiceHost: {{ $.Values.cluster.apiAddr }} # put the value of apiAddr here + k8sServicePort: {{ $.Values.cluster.apiPort }} # put the value of apiPort here +``` + +It can be done adding this code section into chart values `rke.additionalChartValues` section. + +It is also necessary to add to Chart values `.cloudinit.runcmd` the following: + +```sh +- sed -i -e '/net.ipv4.conf.*.rp_filter/d' $(grep -ril '\.rp_filter' /etc/sysctl.d/ /usr/lib/sysctl.d/) +- sysctl -a | grep '\.rp_filter' | awk '{print $1" = 0"}' > /etc/sysctl.d/1000-cilium.conf +- sysctl --system +``` + +## Installing + +``` +helm install cluster-rke2-openstack -f values.yaml repo/cluster-rke2-openstack +``` + +# Acknowledgment + +This Helm Chart was built on top of https://github.com/rancher/cluster-template-examples, and the documentation was inspired by [Bitnami's documentation style](https://github.com/bitnami/charts/blob/4dd3dddb27048a0f818a3ce7a3dad7fece0d0701/template/CHART_NAME/README.md) diff --git a/templates/_autoscaler_getkubeconfig.tpl b/templates/_autoscaler_getkubeconfig.tpl new file mode 100644 index 0000000..b7b7c47 --- /dev/null +++ b/templates/_autoscaler_getkubeconfig.tpl @@ -0,0 +1,5 @@ +{{- define "cluster-rke2-openstack.autoscalerKubeconfig" }} +#!/bin/sh +CLUSTER_ID=$(curl -s -H "Authorization: Bearer {{ $.Values.cluster.autoscaler.rancherToken }}" {{ $.Values.cluster.autoscaler.rancherUrl }}/v3/clusters?name={{ $.Values.cluster.name }} | jq -r .data[].id) +curl -s -u {{ $.Values.cluster.autoscaler.rancherToken }} {{ $.Values.cluster.autoscaler.rancherUrl }}/v3/clusters/$CLUSTER_ID?action=generateKubeconfig -X POST -H 'content-type: application/json' --insecure | jq -r .config +{{- end }} \ No newline at end of file diff --git a/templates/_autoscaler_secret.tpl b/templates/_autoscaler_secret.tpl new file mode 100644 index 0000000..0680cda --- /dev/null +++ b/templates/_autoscaler_secret.tpl @@ -0,0 +1,7 @@ +{{- define "cluster-rke2-openstack.autoscalerConfigMap" }} +url: {{ $.Values.cluster.autoscaler.rancherUrl }} +token: {{ $.Values.cluster.autoscaler.rancherToken }} +clusterName: {{ $.Values.cluster.name }} +clusterNamespace: {{ $.Release.Namespace }} +providerIDPrefix: openstack +{{- end }} \ No newline at end of file diff --git a/templates/_cinder_csi_plugin.tpl b/templates/_cinder_csi_plugin.tpl new file mode 100644 index 0000000..1bbfbbd --- /dev/null +++ b/templates/_cinder_csi_plugin.tpl @@ -0,0 +1,81 @@ +{{/* +OpenStack Cinder CSI plugin +*/}} +{{- define "cluster-rke2-openstack.cinderCsiPlugin" }} +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + annotations: + meta.helm.sh/release-name: {{ $.Release.Name }} + name: cinder-csi-plugin + namespace: kube-system +spec: + chart: openstack-cinder-csi + repo: https://kubernetes.github.io/cloud-provider-openstack + targetNamespace: kube-system + bootstrap: false + valuesContent: |+ + storageClass: + enabled: true + delete: + isDefault: false + allowVolumeExpansion: true + retain: + isDefault: false + allowVolumeExpansion: true + custom: |- + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: csi-cinder-default + annotations: + storageclass.kubernetes.io/is-default-class: "true" + allowVolumeExpansion: true + parameters: + availability: {{ $.Values.openstack.availabilityZone }} + provisioner: cinder.csi.openstack.org + reclaimPolicy: Delete + volumeBindingMode: Immediate + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: csi-cinder-nvme + annotations: + {} + labels: + {} + allowVolumeExpansion: true + parameters: + availability: nova + type: nvme + provisioner: cinder.csi.openstack.org + reclaimPolicy: Retain + volumeBindingMode: Immediate + secret: + enabled: true + create: true + name: cinder-csi-cloud-config + data: + cloud.conf: |- + [Global] + auth-url={{ $.Values.openstack.authUrl }} + application-credential-id={{ $.Values.openstack.applicationCredentialId }} + application-credential-secret={{ $.Values.openstack.applicationCredentialSecret }} + region={{ $.Values.openstack.region }} + [BlockStorage] + ignore-volume-az=true + {{- if .Values.rke.cinderCsiPlugin }} + {{- if .Values.rke.cinderCsiPlugin.image }} + csi: + plugin: + image: + repository: {{ $.Values.imageRegistryURL }}{{ $.Values.rke.cinderCsiPlugin.image }} + {{- if .Values.rke.cinderCsiPlugin.tag }} + tag: {{ $.Values.rke.cinderCsiPlugin.tag }} + {{- end }} + {{- end }} + {{- end }} +--- +{{- end }} \ No newline at end of file diff --git a/templates/_cni_calico.tpl b/templates/_cni_calico.tpl new file mode 100644 index 0000000..4f02f89 --- /dev/null +++ b/templates/_cni_calico.tpl @@ -0,0 +1,29 @@ +{{/* +Calico setup script for running on nodes +*/}} +{{- define "calicoCNIConfigmap" }} +{{- if eq .Values.cluster.cni.name "calico" }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: entrypoint + namespace: kube-system + labels: + app: default-init +data: + entrypoint.sh: | + #!/bin/sh + echo "Starting configuration" + echo "nameserver 8.8.8.8" >> /etc/resolv.conf + cat /etc/resolv.conf + IP_ADDR=$(ip -f inet addr show ens3|grep -o "inet [0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*") + echo "Machine IP: " $IP_ADDR + MAC_ADDRESS=$(cat /sys/class/net/ens3/address) + echo "Mac Address: " $MAC_ADDRESS + ID=$(openstack port list --fixed-ip ip-address=$IP_ADDR -f value -c ID) + echo "Port ID:" $ID + openstack port set --allowed-address mac-address=$MAC_ADDRESS,ip-address={{ $.Values.cluster.cni.podCidr | default "10.42.0.0/16" }} $ID + echo "Configuration Done" +--- +{{- end }} +{{- end }} \ No newline at end of file diff --git a/templates/_kubeconfig_ingress.tpl b/templates/_kubeconfig_ingress.tpl new file mode 100644 index 0000000..8d3e3b5 --- /dev/null +++ b/templates/_kubeconfig_ingress.tpl @@ -0,0 +1,31 @@ +{{/* +Ingress for local authentication bypassing rancher +*/}} +{{ define "cluster-rke2-openstack.kubeconfigIngress"}} +{{ if $.Values.rke.localClusterAuthEndpoint.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kubeconfig + namespace: default + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS +spec: + rules: + - host: {{ $.Values.rke.localClusterAuthEndpoint.fqdn | default "direct-external-access.domain.local" }} + http: + paths: + - backend: + service: + name: kubernetes + port: + number: 443 + path: / + pathType: Prefix + tls: + - hosts: + - {{ $.Values.rke.localClusterAuthEndpoint.fqdn | default "direct-external-access.domain.local" }} + secretName: {{ $.Values.rke.localClusterAuthEndpoint.secretName }} +--- +{{ end }} +{{ end }} \ No newline at end of file diff --git a/templates/_nodeinitializer.tpl b/templates/_nodeinitializer.tpl new file mode 100644 index 0000000..edf8a10 --- /dev/null +++ b/templates/_nodeinitializer.tpl @@ -0,0 +1,69 @@ +{{/* +Node initializer to configurate +*/}} +{{- define "calicoNodeInitializer" }} +{{- if eq .Values.cluster.cni.name "calico" }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-initializer + namespace: kube-system + labels: + app: default-init +spec: + selector: + matchLabels: + app: default-init + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + name: node-initializer + app: default-init + spec: + priorityClassName: system-node-critical + hostNetwork: true + volumes: + - name: root-mount + hostPath: + path: / + - name: entrypoint + configMap: + name: entrypoint + defaultMode: 0744 + initContainers: + - image: {{ $.Values.openstack.openstackClientImage }} + name: node-initializer + command: ["/scripts/entrypoint.sh"] + env: + - name: OS_AUTH_TYPE + value: v3applicationcredential + - name: OS_REGION_NAME + value: {{ $.Values.openstack.region }} + - name: OS_INTERFACE + value: public + - name: OS_AUTH_URL + value: {{ $.Values.openstack.authUrl }}/v3 + - name: OS_APPLICATION_CREDENTIAL_ID + value: {{ $.Values.openstack.applicationCredentialId }} + - name: OS_APPLICATION_CREDENTIAL_SECRET + value: {{ $.Values.openstack.applicationCredentialSecret }} + - name: ROOT_MOUNT_DIR + value: /root + securityContext: + privileged: true + volumeMounts: + - name: root-mount + mountPath: /root + - name: entrypoint + mountPath: /scripts + containers: + - image: google/pause + name: pause + tolerations: + - key: + operator: Exists +--- +{{- end }} +{{- end }} \ No newline at end of file diff --git a/templates/_nodescript.tpl b/templates/_nodescript.tpl new file mode 100644 index 0000000..efb3d0e --- /dev/null +++ b/templates/_nodescript.tpl @@ -0,0 +1,57 @@ +{{/* +Node scripts to run arbitrary codes on cluster nodes through DaemonSets +*/}} +{{- define "cluster-rke2-openstack.nodeScript" }} +{{- range .Values.rke.nodeScripts }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-script-{{ .name }} + namespace: kube-system + labels: + script: {{ .name }} +spec: + selector: + matchLabels: + script: {{ .name }} + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + name: node-script-{{ .name }} + script: {{ .name }} + spec: + priorityClassName: system-node-critical + hostNetwork: true + {{- if .volumes }} + volumes: + {{- toYaml .volumes.entries | nindent 6 }} + {{- end }} + initContainers: + - image: {{ .image | default "alpine:3.8" }} + name: node-script-{{ .name }} + command: + {{- toYaml .script | nindent 8 }} + {{- if .env }} + env: + {{- toYaml .env | nindent 8 }} + {{- end }} + securityContext: + privileged: true + {{- if .volumes }} + volumeMounts: + {{- toYaml .volumes.volumeMounts | nindent 8 }} + {{- end }} + containers: + # @todo parametize + - image: {{ .pauseContainerImage }} + name: pause + tolerations: + {{- if $.runOnControlPlanes }} + - key: + operator: Exists + {{- end}} +--- +{{- end }} +{{- end }} \ No newline at end of file diff --git a/templates/_openstack_controller_mng.tpl b/templates/_openstack_controller_mng.tpl new file mode 100644 index 0000000..31128bc --- /dev/null +++ b/templates/_openstack_controller_mng.tpl @@ -0,0 +1,61 @@ +{{/* +OpenStack Cloud controller Manager +*/}} +{{ define "cluster-rke2-openstack.openstack-controller-manager" }} +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: openstack-cloud-controller-manager + namespace: kube-system +spec: + bootstrap: true + chart: openstack-cloud-controller-manager + repo: https://kubernetes.github.io/cloud-provider-openstack + targetNamespace: kube-system + valuesContent: |- + controllerExtraArgs: |- + - --cluster-name={{ $.Values.cluster.name }} + logVerbosityLevel: 2 + secret: + create: true + name: cloud-config + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoExecute + key: node-role.kubernetes.io/etcd + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + cloudConfig: + global: + auth-url: {{ $.Values.openstack.authUrl }} + application-credential-id: {{ $.Values.openstack.applicationCredentialId }} + application-credential-secret: {{ $.Values.openstack.applicationCredentialSecret }} + region: {{ $.Values.openstack.region }} + loadBalancer: + {{- if .Values.rke.openstackControllerManager }} + create-monitor: {{ $.Values.rke.openstackControllerManager.enableLoadBalancerCreateMonitor }} + {{- else }} + create-monitor: false + {{- end }} + monitor-delay: 60s + monitor-timeout: 30s + monitor-max-retries: 5 + use-octavia: true + cascade-delete: true + subnet-id: {{ $.Values.openstack.subnetID }} + floating-network-id: {{ $.Values.openstack.floatingNetID }} + block_storage: + ignore-volume-az: true + {{- if .Values.rke.openstackControllerManager }} + {{- if .Values.rke.openstackControllerManager.image }} + image: + repository: {{ $.Values.imageRegistryURL }}{{ $.Values.rke.openstackControllerManager.image }} + {{- if .Values.rke.openstackControllerManager.tag }} + tag: {{ $.Values.rke.openstackControllerManager.tag }} + {{- end }} + {{- end }} + {{- end }} +--- +{{ end }} \ No newline at end of file diff --git a/templates/_rke_ingress_raw.tpl b/templates/_rke_ingress_raw.tpl new file mode 100644 index 0000000..3702d3d --- /dev/null +++ b/templates/_rke_ingress_raw.tpl @@ -0,0 +1,30 @@ +{{/* +Cluster ingress via raw manifest +*/}} +{{ define "cluster-rke2-openstack.rke-ingress-raw-manifest" }} +{{- if $.Values.rke.rkeIngressRawManifest.enabled }} +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + annotations: + meta.helm.sh/release-name: {{ $.Release.Name }} + name: rke2-ingress-nginx + namespace: kube-system +spec: + bootstrap: false + targetNamespace: kube-system + valuesContent: |- + controller: + hostNetwork: false + publishService: + enabled: true + service: + enabled: true + kind: DaemonSet + tolerations: + - effect: NoExecute + key: CriticalAddonsOnly + operator: "Exists" +--- +{{- end }} +{{ end }} diff --git a/templates/autoscaler.yaml b/templates/autoscaler.yaml new file mode 100644 index 0000000..3b3a719 --- /dev/null +++ b/templates/autoscaler.yaml @@ -0,0 +1,232 @@ +{{- if $.Values.cluster.autoscaler.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: {{ $.Release.Namespace }} + labels: + app: cluster-autoscaler +spec: + selector: + matchLabels: + app: cluster-autoscaler + replicas: 1 + template: + metadata: + labels: + app: cluster-autoscaler + spec: + initContainers: + - name: get-kubeconfig + image: luizalabscicdmgc/netshoot + command: + - /bin/bash + - -exc + - | + /bin/bash /script/init.sh > /config/kubeconfig + volumeMounts: + - name: tmp-kubeconfig + mountPath: /config + - name: kubeconfigscript + mountPath: /script + containers: + - image: {{ $.Values.cluster.autoscaler.image }} + name: cluster-autoscaler + command: + - ./cluster-autoscaler + args: + - --kubeconfig=/config/kubeconfig + - --cloud-provider=rancher + - --cloud-config=/rancherconfig/config.yaml + - --logtostderr=true + - --stderrthreshold=info + - --expander=random + - --node-group-auto-discovery=rancher:clusterName={{ $.Values.cluster.name }} + - --v=4 + volumeMounts: + - name: rancherconfig + mountPath: /rancherconfig + readOnly: true + - name: tmp-kubeconfig + mountPath: /config + env: + - name: CAPI_GROUP + value: cluster.x-k8s.io + serviceAccountName: user-{{ $.Values.cluster.name }} + terminationGracePeriodSeconds: 10 + volumes: + - name: rancherconfig + secret: + secretName: autoscaler-cluster-creds + - name: kubeconfigscript + secret: + secretName: autoscaler-cluster-script + - name: tmp-kubeconfig + emptyDir: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cluster-autoscaler-workload-{{ $.Values.cluster.name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler-workload-{{ $.Values.cluster.name }} +subjects: +- kind: ServiceAccount + name: user-{{ $.Values.cluster.name }} + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cluster-autoscaler-management-{{ $.Values.cluster.name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler-management-{{ $.Values.cluster.name }} +subjects: +- kind: ServiceAccount + name: user-{{ $.Values.cluster.name }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: user-{{ $.Values.cluster.name }} + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cluster-autoscaler-workload-{{ $.Values.cluster.name }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + - persistentvolumeclaims + - persistentvolumes + - pods + - replicationcontrollers + - services + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - csinodes + - storageclasses + - csidrivers + - csistoragecapacities + verbs: + - get + - list + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - replicasets + - statefulsets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cluster-autoscaler-management-{{ $.Values.cluster.name }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - cluster.x-k8s.io + resources: + - machinedeployments + - machinedeployments/scale + - machines + - machinesets + verbs: + - get + - list + - update + - watch +--- +apiVersion: v1 +kind: Secret +metadata: + name: autoscaler-cluster-creds + namespace: {{ .Release.Namespace }} +type: Opaque +data: + config.yaml: {{- include "cluster-rke2-openstack.autoscalerConfigMap" . | b64enc | indent 1 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: autoscaler-cluster-script + namespace: {{ .Release.Namespace }} +type: Opaque +data: + init.sh: {{- include "cluster-rke2-openstack.autoscalerKubeconfig" . | b64enc | indent 1 }} +{{ end }} \ No newline at end of file diff --git a/templates/cluster.yaml b/templates/cluster.yaml new file mode 100644 index 0000000..dde7774 --- /dev/null +++ b/templates/cluster.yaml @@ -0,0 +1,203 @@ +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +metadata: + name: {{ $.Values.cluster.name }} + annotations: + ui.rancher/badge-color: '#{{ .Values.cluster.name | sha256sum | substr 0 6}}' + ui.rancher/badge-icon-text: {{ .Values.cluster.name | upper }} + ui.rancher/badge-text: {{ $.Values.openstack.tenantName }} + lifecycle.cattle.io/create.nodepool-provisioner: 'true' + nodepool.cattle.io/reconcile: '' + namespace: {{ .Release.Namespace }} +spec: + cloudCredentialSecretName: {{ $.Values.cluster.cloudCredentialSecretName }} + kubernetesVersion: {{ $.Values.cluster.kubernetesVersion }} + enableNetworkPolicy: false + localClusterAuthEndpoint: + enabled: {{ $.Values.rke.localClusterAuthEndpoint.enabled | default "false" }} + fqdn: {{ $.Values.rke.localClusterAuthEndpoint.fqdn }} + caCerts: {{ $.Values.rke.localClusterAuthEndpoint.caCerts | toJson }} + rkeConfig: + # Registries to be injected into /etc/rancher/rke2/registries.yaml + {{- if $.Values.rke.registries.enabled }} + registries: + configs: + {{- toYaml $.Values.rke.registries.configs | nindent 8 }} + mirrors: + {{- toYaml $.Values.rke.registries.mirrors | nindent 8 }} + {{- end }} + etcd: + snapshotScheduleCron: {{ $.Values.rke.etcd.snapshotScheduleCron }} + snapshotRetention: {{ $.Values.rke.etcd.snapshotRetention }} + machineGlobalConfig: + cloud-provider-name: external + disable-cloud-controller: true + secrets-encryption: {{ $.Values.rke.secretsEncryption | default "false" }} + {{- if $.Values.rke.etcd.args }} + etcd-arg: {{ toYaml $.Values.rke.etcd.args | nindent 9 }} + {{- end }} + etcd-expose-metrics: {{ $.Values.rke.etcd.exposeMetrics }} + cni: {{ $.Values.cluster.cni.name }} + {{- if $.Values.rke.kubeapi.args }} + kube-apiserver-arg: {{ toYaml $.Values.rke.kubeapi.args | nindent 9 }} + {{- end }} + {{- if $.Values.rke.tlsSan }} + tls-san: {{ toYaml $.Values.rke.tlsSan | nindent 9}} + {{- end }} + {{- if $.Values.rke.kubelet.args }} + kubelet-arg: {{ toYaml $.Values.rke.kubelet.args | nindent 9}} + {{- end }} + machineSelectorConfig: + - config: + {{- if .Values.rke.agentEnvVars }} + agentEnvVars: + {{- range $key, $value := $.Values.rke.agentEnvVars }} + - name: {{ $value.name | quote }} + value: {{ $value.value | quote }} + {{- end }} + {{- end }} + machineLabelSelector: + matchLabels: + node-role.kubernetes.io/control-plane: "false" + - config: + protect-kernel-defaults: false + chartValues: + rke2-coredns: {{ toYaml .Values.rke.coredns | nindent 8}} + {{- if $.Values.rke.rkeIngressChart.enabled }} + rke2-ingress-nginx: + controller: + kind: "Deployment" + hostNetwork: false + publishService: + enabled: true + service: + enabled: true + autoscaling: + enabled: {{ $.Values.rke.rkeIngressChart.autoScaling.enabled | default "true" }} + minReplicas: {{ $.Values.rke.rkeIngressChart.autoScaling.minReplicas | default 1 }} + maxReplicas: {{ $.Values.rke.rkeIngressChart.autoScaling.maxReplicas | default 3 }} + replicaCount: {{ $.Values.rke.rkeIngressChart.replicaCount | default 1 }} + tolerations: + - effect: NoExecute + key: CriticalAddonsOnly + {{- end}} + {{- if .Values.rke.additionalChartValues }} + {{- toYaml $.Values.rke.additionalChartValues | nindent 6 }} + {{- end }} + machinePools: + {{- if .Values.nodepools }} + {{ range $index, $nodepool := .Values.nodepools }} + - name: {{ $nodepool.name }} + controlPlaneRole: {{ $nodepool.controlplane }} + etcdRole: {{ $nodepool.etcd }} + workerRole: {{ $nodepool.worker }} + quantity: {{ $nodepool.quantity }} + labels: + {{- range $key, $value := $nodepool.labels }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if $nodepool.taints }} + taints: {{- toYaml $nodepool.taints | nindent 6 }} + {{- end }} + {{- if $nodepool.nodeStartupTimeout }} + nodeStartupTimeout: {{ $nodepool.nodeStartupTimeout }} + {{- end }} + {{- if $nodepool.unhealthyNodeTimeout }} + unhealthyNodeTimeout: {{ $nodepool.unhealthyNodeTimeout }} + drainBeforeDelete: true + {{- end }} + {{- if $nodepool.maxUnhealthy }} + maxUnhealthy: {{ $nodepool.maxUnhealthy }} + {{- end }} + {{- if $nodepool.unhealthyRange }} + unhealthyRange: {{ $nodepool.unhealthyRange }} + {{- end }} + machineConfigRef: + kind: OpenstackConfig + name: {{ $nodepool.name }} + paused: {{ $nodepool.paused | default false }} + displayName: {{ $nodepool.displayName | default $nodepool.name }} + {{- if $nodepool.rollingUpdate }} + rollingUpdate: + maxUnavailable: {{ $nodepool.rollingUpdate.maxUnavailable }} + maxSurge: {{ $nodepool.rollingUpdate.maxSurge }} + {{- end }} + {{- if $nodepool.machineDeploymentLabels }} + machineDeploymentLabels: + {{ toYaml $nodepool.machineDeploymentLabels | indent 8 }} + {{- end }} + machineDeploymentAnnotations: + cluster.provisioning.cattle.io/autoscaler-max-size: "{{ $nodepool.nodeGroupMaxSize }}" + cluster.provisioning.cattle.io/autoscaler-min-size: "{{ $nodepool.nodeGroupMinSize }}" + {{- if $nodepool.machineDeploymentAnnotations }} + {{ toYaml $nodepool.machineDeploymentAnnotations | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.nodepool }} + {{ $nodepool := .Values.nodepool }} + - name: {{ $nodepool.name }} + controlPlaneRole: {{ $nodepool.controlplane }} + etcdRole: {{ $nodepool.etcd }} + workerRole: {{ $nodepool.worker }} + quantity: {{ $nodepool.quantity }} + labels: + {{- range $key, $value := $nodepool.labels }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if $nodepool.taints }} + taints: {{- toYaml $nodepool.taints | nindent 6 }} + {{- end }} + {{- if $nodepool.nodeStartupTimeout }} + nodeStartupTimeout: {{ $nodepool.nodeStartupTimeout }} + {{- end }} + {{- if $nodepool.unhealthyNodeTimeout }} + unhealthyNodeTimeout: {{ $nodepool.unhealthyNodeTimeout }} + drainBeforeDelete: true + {{- end }} + {{- if $nodepool.maxUnhealthy }} + maxUnhealthy: {{ $nodepool.maxUnhealthy }} + {{- end }} + {{- if $nodepool.unhealthyRange }} + unhealthyRange: {{ $nodepool.unhealthyRange }} + {{- end }} + machineConfigRef: + kind: OpenstackConfig + name: {{ $nodepool.name }} + paused: {{ $nodepool.paused | default false }} + displayName: {{ $nodepool.displayName | default $nodepool.name }} + {{- if $nodepool.rollingUpdate }} + rollingUpdate: + maxUnavailable: {{ $nodepool.rollingUpdate.maxUnavailable }} + maxSurge: {{ $nodepool.rollingUpdate.maxSurge }} + {{- end }} + {{- if $nodepool.machineDeploymentLabels }} + machineDeploymentLabels: + {{ toYaml $nodepool.machineDeploymentLabels | indent 8 }} + {{- end }} + machineDeploymentAnnotations: + cluster.provisioning.cattle.io/autoscaler-max-size: "{{ $nodepool.nodeGroupMaxSize }}" + cluster.provisioning.cattle.io/autoscaler-min-size: "{{ $nodepool.nodeGroupMinSize }}" + {{- if $nodepool.machineDeploymentAnnotations }} + {{ toYaml $nodepool.machineDeploymentAnnotations | indent 8 }} + {{- end }} + {{- end }} + additionalManifest: | + --- + {{- include "cluster-rke2-openstack.nodeScript" . | indent 6}} + {{- include "cluster-rke2-openstack.kubeconfigIngress" . | indent 6}} + {{- include "cluster-rke2-openstack.cinderCsiPlugin" . | indent 6}} + {{- include "cluster-rke2-openstack.openstack-controller-manager" . | indent 6}} + {{- include "cluster-rke2-openstack.rke-ingress-raw-manifest" . | indent 6}} + {{- if eq .Values.cluster.cni.name "calico" }} + {{- include "calicoCNIConfigmap" . | indent 6}} + {{- include "calicoNodeInitializer" . | indent 6}} + {{- end }} +{{- range .Values.cluster.additionalManifests }} +{{ toYaml . | indent 6}} + --- +{{- end }} +{{- if .Values.cluster.upgradeStrategy }} + upgradeStrategy: +{{ toYaml .Values.cluster.upgradeStrategy | indent 4 }} +{{- end }} diff --git a/templates/clusterroletemplatebinding.yaml b/templates/clusterroletemplatebinding.yaml new file mode 100644 index 0000000..7574ca4 --- /dev/null +++ b/templates/clusterroletemplatebinding.yaml @@ -0,0 +1,10 @@ +{{- range $index, $member := .Values.clusterMembers }} +apiVersion: management.cattle.io/v3 +clusterName: {{ $.Values.cluster.name }} +kind: ClusterRoleTemplateBinding +metadata: + name: ctrb-{{ trunc 8 (sha256sum (printf "%s/%s" $.Release.Namespace $member.principalName )) }} + namespace: {{ $.Release.Namespace }} +roleTemplateName: {{ $member.roleTemplateName }} +userPrincipalName: {{ $member.principalName }} +{{- end }} \ No newline at end of file diff --git a/templates/managedcharts.yaml b/templates/managedcharts.yaml new file mode 100644 index 0000000..596089a --- /dev/null +++ b/templates/managedcharts.yaml @@ -0,0 +1,50 @@ +{{- if .Values.monitoring.enabled }} +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: monitoring-crd-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "rancher-monitoring-crd" + repoName: "rancher-charts" + releaseName: "rancher-monitoring-crd" + version: {{ .Values.monitoring.version }} + {{- if .Values.monitoring.values }} + values: +{{ toYaml .Values.monitoring.values | indent 4 }} + {{- end }} + defaultNamespace: "cattle-monitoring-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +apiVersion: management.cattle.io/v3 +kind: ManagedChart +metadata: + name: monitoring-{{ .Values.cluster.name }} + namespace: fleet-default +spec: + chart: "rancher-monitoring" + repoName: "rancher-charts" + releaseName: "rancher-monitoring" + version: {{ .Values.monitoring.version }} + {{- if .Values.monitoring.values }} + values: +{{ toYaml .Values.monitoring.values | indent 4 }} + {{- end }} + diff: + comparePatches: + - apiVersion: admissionregistration.k8s.io/v1beta1 + kind: MutatingWebhookConfiguration + name: rancher-monitoring-admission + jsonPointers: + - /webhooks/0/failurePolicy + - apiVersion: admissionregistration.k8s.io/v1beta1 + kind: ValidatingWebhookConfiguration + name: rancher-monitoring-admission + jsonPointers: + - /webhooks/0/failurePolicy + defaultNamespace: "cattle-monitoring-system" + targets: + - clusterName: {{ .Values.cluster.name }} +--- +{{- end }} \ No newline at end of file diff --git a/templates/nodeconfig.yaml b/templates/nodeconfig.yaml new file mode 100644 index 0000000..cfd2110 --- /dev/null +++ b/templates/nodeconfig.yaml @@ -0,0 +1,142 @@ +{{- if eq .Values.cloudprovider "openstack" }} +{{- range $index, $nodepool := .Values.nodepools }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: OpenstackConfig +metadata: + name: {{ $nodepool.name }} + namespace: {{ $.Release.Namespace }} +{{- if $nodepool.userDataFile }} +userDataFile: {{ $nodepool.userDataFile }} +{{- end }} +{{- if $.Values.cloudinit.enable }} +userDataFile: |+ + #cloud-config + {{- if $.Values.cloudinit.sshPubKeys }} + ssh_authorized_keys: + {{- range $value := $.Values.cloudinit.sshPubKeys }} + - {{ $value }} + {{- end }} + {{- end }} + {{- if $.Values.cloudinit.bootcmd }} + bootcmd: + {{- range $value := $.Values.cloudinit.runcmd }} + - {{ $value }} + {{- end }} + {{- end }} + {{- if $.Values.cloudinit.runcmd }} + package_update: true + package_upgrade: true + cloud_config_modules: + - runcmd + cloud_final_modules: + - scripts-user + runcmd: + {{- range $value := $.Values.cloudinit.runcmd }} + - {{ $value }} + {{- end }} + - cat /var/log/cloud-init.log + output: {all: '| tee -a /var/log/cloud-init-output.log'} + {{- end }} +{{- end }} +authUrl: {{ $.Values.openstack.authUrl }} +region: {{ $.Values.openstack.region | default "RegionOne" }} +applicationCredentialId: {{ $.Values.openstack.applicationCredentialId }} +applicationCredentialSecret: {{ $.Values.openstack.applicationCredentialSecret }} +availabilityZone: {{ $nodepool.availabilityZone }} +domainName: {{ $.Values.openstack.domainName | default "default" }} +netId: {{ $nodepool.netId }} +tenantName: {{ $.Values.openstack.tenantName }} +{{- if $.Values.openstack.username }} +username: {{ $.Values.openstack.username }} +{{- end }} +sshUser: {{ $nodepool.sshUser | default "ubuntu" }} +sshPort: "{{ $nodepool.sshPort | default "22" }}" +activeTimeout: "{{ $nodepool.activeTimeout | default "200" }}" +flavorName: {{ $nodepool.flavorName }} +imageName: {{ $nodepool.imageName | default "ubuntu-2004-cloudimg" }} +ipVersion: "4" +keypairName: {{ $nodepool.keypairName }} +# Valor default aceito pela implementação do helm porém ainda sem funcionar com sucesso +privateKeyFile: {{ $nodepool.privateKeyFile | default "./keypair.pem" }} +secGroups: {{ $nodepool.secGroups | default "default" }} +tenantDomainName: {{ $nodepool.tenantDomainName | default "Default" }} +bootFromVolume: {{ $nodepool.bootFromVolume }} +{{- if $nodepool.volumeSize }} +volumeSize: "{{ $nodepool.volumeSize }}" +{{- end }} +{{- if $nodepool.volumeType }} +volumeType: {{ $nodepool.volumeType }} +{{- end }} +--- +{{- end }} +{{ $nodepool := .Values.nodepool }} +{{- if $nodepool }} +apiVersion: rke-machine-config.cattle.io/v1 +kind: OpenstackConfig +metadata: + name: {{ $nodepool.name }} + namespace: {{ $.Release.Namespace }} +{{- if $nodepool.userDataFile }} +userDataFile: {{ $nodepool.userDataFile }} +{{- end }} +{{- if $.Values.cloudinit.enable }} +userDataFile: |+ + #cloud-config + {{- if $.Values.cloudinit.sshPubKeys }} + ssh_authorized_keys: + {{- range $value := $.Values.cloudinit.sshPubKeys }} + - {{ $value }} + {{- end }} + {{- end }} + {{- if $.Values.cloudinit.bootcmd }} + bootcmd: + {{- range $value := $.Values.cloudinit.runcmd }} + - {{ $value }} + {{- end }} + {{- end }} + {{- if $.Values.cloudinit.runcmd }} + package_update: true + package_upgrade: true + cloud_config_modules: + - runcmd + cloud_final_modules: + - scripts-user + runcmd: + {{- range $value := $.Values.cloudinit.runcmd }} + - {{ $value }} + {{- end }} + - cat /var/log/cloud-init.log + output: {all: '| tee -a /var/log/cloud-init-output.log'} + {{- end }} +{{- end }} +authUrl: {{ $.Values.openstack.authUrl }} +region: {{ $.Values.openstack.region | default "RegionOne" }} +applicationCredentialId: {{ $.Values.openstack.applicationCredentialId }} +applicationCredentialSecret: {{ $.Values.openstack.applicationCredentialSecret }} +availabilityZone: {{ $nodepool.availabilityZone }} +domainName: {{ $.Values.openstack.domainName | default "default" }} +netId: {{ $nodepool.netId }} +tenantName: {{ $.Values.openstack.tenantName }} +{{- if $.Values.openstack.username }} +username: {{ $.Values.openstack.username }} +{{- end }} +sshUser: {{ $nodepool.sshUser | default "ubuntu" }} +sshPort: "{{ $nodepool.sshPort | default "22" }}" +activeTimeout: "{{ $nodepool.activeTimeout | default "200" }}" +flavorName: {{ $nodepool.flavorName }} +imageName: {{ $nodepool.imageName | default "ubuntu-2004-cloudimg" }} +ipVersion: "4" +keypairName: {{ $nodepool.keypairName }} +# Valor default aceito pela implementação do helm porém ainda sem funcionar com sucesso +privateKeyFile: {{ $nodepool.privateKeyFile | default "./keypair.pem" }} +secGroups: {{ $nodepool.secGroups | default "default" }} +tenantDomainName: {{ $nodepool.tenantDomainName | default "Default" }} +bootFromVolume: {{ $nodepool.bootFromVolume }} +{{- if $nodepool.volumeSize }} +volumeSize: "{{ $nodepool.volumeSize }}" +{{- end }} +{{- if $nodepool.volumeType }} +volumeType: {{ $nodepool.volumeType }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/templates/secret_autoscaler.yaml b/templates/secret_autoscaler.yaml new file mode 100644 index 0000000..69bc385 --- /dev/null +++ b/templates/secret_autoscaler.yaml @@ -0,0 +1,17 @@ +{{ if $.Values.cluster.autoscaler.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: rancherconfig + namespace: {{ $.Release.Namespace }} + labels: + managed-by: helm +type: Opaque +stringData: + secret: |- + [Global] + url = {{ $.Values.cluster.autoscaler.rancherUrl | quote }} + token = {{ $.Values.cluster.autoscaler.rancherToken | quote }} + rancher_cluster_name = {{ $.Values.cluster.autoscaler.racherClusterName | quote }} + cluster_name = {{ $.Values.cluster.name | quote }} +{{ end }} \ No newline at end of file diff --git a/values.yaml b/values.yaml new file mode 100644 index 0000000..c11e73c --- /dev/null +++ b/values.yaml @@ -0,0 +1,160 @@ +cloudprovider: openstack +imageRegistryURL: "" +cloudinit: + enable: false + sshPubKeys: [] + bootcmd: + - sed -i -e '/net.ipv4.conf.*.rp_filter/d' $(grep -ril '\.rp_filter' /etc/sysctl.d/ /usr/lib/sysctl.d/) + - sysctl -a | grep '\.rp_filter' | awk '{print $1" = 0"}' > /etc/sysctl.d/1000-cilium.conf + - sysctl --system + runcmd: + - sed -i -e '/net.ipv4.conf.*.rp_filter/d' $(grep -ril '\.rp_filter' /etc/sysctl.d/ /usr/lib/sysctl.d/) + - sysctl -a | grep '\.rp_filter' | awk '{print $1" = 0"}' > /etc/sysctl.d/1000-cilium.conf + - sysctl --system +openstack: + authUrl: https://openstack.example.com:5000 + applicationCredentialId: + applicationCredentialSecret: + availabilityZone: nova + subnetID: + projectId: + tenantDomainName: Default + tenantName: + username: + domainName: default + region: RegionOne + floatingNetID: + floatingSubnetID: + openstackClientImage: openstacktools/openstack-client +cluster: + apiAddr: kubernetes.default.svc.cluster.local + apiPort: 6443 + additionalManifests: {} + secretsEncryption: false + upgradeStrategy: + controlPlaneDrainOptions: + enabled: false + deleteEmptyDirData: false + disableEviction: false + gracePeriod: 0 + ignoreErrors: false + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 0 + workerDrainOptions: + enabled: false + deleteEmptyDirData: false + disableEviction: false + gracePeriod: 0 + ignoreErrors: false + skipWaitForDeleteTimeoutSeconds: 0 + timeout: 0 + workerConcurrency: "10%" + controlPlaneConcurrency: "10%" + name: placeholder-cluster-name + kubernetesVersion: v1.21.14+rke2r1 + cni: + name: cilium + autoscaler: + enabled: true + rancherUrl: https://rancher.placeholder.com + rancherToken: rancher-token + image: luizalabscicdmgc/cluster-autoscaler-amd64:dev +monitoring: + enabled: false +rke: + rkeIngressChart: + enabled: true + replicaCount: "1" + autoScaling: + enabled: true + minReplicas: "1" + maxReplicas: "3" + additionalChartValues: + rke2-cilium: + cilium: + mtu: 1430 # can be changed + hubble: + metrics: + enabled: + - dns:query;ignoreAAAA + - drop + - tcp + - flow + - icmp + - http + relay: + enabled: true + image: + repository: "cilium/hubble-relay" + tag: "v1.12.1" + ui: + backend: + image: + repository: "cilium/hubble-ui-backend" + tag: "v0.9.2" + enabled: true + frontend: + image: + repository: "cilium/hubble-ui" + tag: "v0.9.2" + replicas: 1 + image: + repository: "rancher/mirrored-cilium-cilium" + tag: "v1.12.1" + nodeinit: + image: + repository: "rancher/mirrored-cilium-startup-script" + tag: "d69851597ea019af980891a4628fb36b7880ec26" + operator: + image: + repository: "rancher/mirrored-cilium-operator" + tag: "v1.12.1" + preflight: + image: + repository: "rancher/mirrored-cilium-cilium" + tag: "v1.12.1" + kubeProxyReplacement: "strict" + k8sServiceHost: kubernetes.default.svc.cluster.local + k8sServicePort: 6443 + rkeIngressRawManifest: + enabled: false + etcd: + args: + - "quota-backend-bytes=858993459" + - "max-request-bytes=33554432" + exposeMetrics: true + snapshotRetention: 5 + snapshotScheduleCron: "0 */12 * * *" + coredns: + nodelocal: + enabled: true + openstackControllerManager: + image: k8scloudprovider/openstack-cloud-controller-manager + tag: v1.24.0 + enableLoadBalancerCreateMonitor: false + cinderCsiPlugin: + image: k8scloudprovider/cinder-csi-plugin + tag: v1.25.0 + registries: + enabled: false + configs: + gcr.io: + authConfigSecretName: secret-example + caBundle: '' + insecureSkipVerify: false + mirrors: + gcr.io: + endpoint: + - 'https://gcr.io' + nodeScripts: [] + agentEnvVars: [] + kubeapi: + args: {} + kubelet: + args: {} + localClusterAuthEndpoint: + enabled: false + fqdn: example.rancher.local + secretName: example-rancher-local-secret + tlsSan: [] +nodepools: [] diff --git a/values_example.yaml b/values_example.yaml new file mode 100644 index 0000000..6534938 --- /dev/null +++ b/values_example.yaml @@ -0,0 +1,316 @@ +cloudprovider: openstack +imageRegistryURL: "" +cloudinit: + enable: true + sshPubKeys: + - ssh-rsa pubkey + bootcmd: + runcmd: + - touch /tmp/cloud_init +openstack: + authUrl: https://openstack.example.com:5000 + applicationCredentialId: + applicationCredentialSecret: + availabilityZone: nova + subnetID: 427a8649-583c-4564-a3cf-d49be1f932b2 + projectId: c13899f8-f869-47f7-83df-f470c989afb1 + tenantDomainName: Default + tenantName: tenant + username: tenant + domainName: default + region: RegionOne + floatingNetID: baa6c2c8-4959-437c-84fa-8ed87bf72687 + floatingSubnetID: 66931ed8-2fc3-40b6-b549-3c773e50f805 + openstackClientImage: openstacktools/openstack-client +cluster: + apiAddr: kubernetes.default.svc.cluster.local + apiPort: 6443 + additionalManifests: + - apiVersion: v1 + kind: Secret + metadata: + name: secret + namespace: ns + type: Opaque + data: + file1.json: xxx + secretsEncryption: false + upgradeStrategy: + controlPlaneDrainOptions: + enabled: false + # deleteEmptyDirData: false + # disableEviction: false + # gracePeriod: 0 + # ignoreErrors: false + # skipWaitForDeleteTimeoutSeconds: 0 + # timeout: 0 + workerDrainOptions: + enabled: false + # deleteEmptyDirData: false + # disableEviction: false + # gracePeriod: 0 + # ignoreErrors: false + # skipWaitForDeleteTimeoutSeconds: 0 + # timeout: 0 + workerConcurrency: "10%" + controlPlaneConcurrency: "10%" + name: cluster-name + kubernetesVersion: v1.21.14+rke2r1 + cni: + # Only one of {cniCanal,cniCalico,cniCilium} can be enabled + cniCanal: + enabled: false + name: canal + mtu: 1430 + cniCalico: + enabled: false + name: calico + blockSize: 24 + podCidr: 10.42.0.0/16 + mtu: 1430 + cniCilium: + enabled: true + name: cilium + blockSize: 24 + podCidr: 10.42.0.0/16 + mtu: 1430 + images: + hubbleRelay: + repository: cilium/hubble-relay + tag: v1.12.1 + hubbleUiBackend: + repository: cilium/hubble-ui-backend + tag: v0.9.2 + hubbleUi: + repository: cilium/hubble-ui + tag: v0.9.2 + cilium: + repository: rancher/mirrored-cilium-cilium + tag: v1.12.1 + operator: + repository: rancher/mirrored-cilium-operator + tag: v1.12.1 + preflight: + repository: rancher/mirrored-cilium-cilium + tag: v1.12.1 + ciliumStartupScript: + repository: rancher/mirrored-cilium-startup-script + tag: d69851597ea019af980891a4628fb36b7880ec26 + autoscaler: + enabled: false + rancherUrl: https://rancher.placeholder.com + rancherToken: rancher-token + racherClusterName: local + image: luizalabscicdmgc/cluster-autoscaler-arm64:dev +monitoring: + enabled: false +rke: + rkeIngressChart: + enabled: true + replicaCount: "1" + autoScaling: + enabled: true + minReplicas: "1" + maxReplicas: "3" + rkeIngressRawManifest: + enabled: false + etcd: + args: + - "quota-backend-bytes=858993459" + - "max-request-bytes=33554432" + exposeMetrics: true + snapshotRetention: 5 + snapshotScheduleCron: "0 */12 * * *" # every 12 hours + coredns: + nodelocal: + enabled: true + openstackControllerManager: + image: k8scloudprovider/openstack-cloud-controller-manager + tag: v1.24.0 + enableLoadBalancerCreateMonitor: false + cinderCsiPlugin: + image: k8scloudprovider/cinder-csi-plugin + tag: v1.25.0 + registries: + enabled: false + configs: + gcr.io: + authConfigSecretName: secret-example + caBundle: '' + insecureSkipVerify: false + mirrors: + gcr.io: + endpoint: + - 'https://gcr.io' + nodeScripts: + - name: script-example-1 + runOnControlPlanes: true + script: + - /bin/bash + - -c + - "touch /tmp/example_script" + image: "ubuntu:22.04" + pauseContainerImage: "google/pause" + env: + - name: "ENV" + value: "VALUE" + volumes: + entries: + - name: root-mount + hostPath: + path: / + volumeMounts: + - name: root-mount + mountPath: /root + # LoadBalancer + agentEnvVars: [] + # - name: RKE2_URL + # value: https://10.1.1.131:9345 + kubeapi: + args: + - "watch-cache=true" + kubelet: + args: + - "max-pods=150" + localClusterAuthEndpoint: + enabled: false + fqdn: example.rancher.local + secretName: example-rancher-local-secret + tlsSan: [] +nodepools: + - name: wa + availabilityZone: zone-a + quantity: 1 + etcd: false + worker: true + netId: 427a8649-583c-4564-a3cf-d49be1f932b2 + controlplane: false + bootFromVolume: false + volumeSize: + volumeDevicePath: + volumeType: + flavorName: + imageName: "ubuntu-22.04" + secGroups: default + keypairName: + sshUser: ubuntu + sshPort: 22 + activeTimeout: 900 + nodeGroupMaxSize: 10 + nodeGroupMinSize: 10 + labels: + key: value + example: value + taints: + - key: test1 + value: test1 + effect: NoSchedule + - key: example + value: yes + effect: PreferNoSchedule + unhealthyNodeTimeout: 5m + drainBeforeDelete: true + - name: wb + availabilityZone: zone-b + quantity: 1 + etcd: false + worker: true + netId: 427a8649-583c-4564-a3cf-d49be1f932b2 + controlplane: false + bootFromVolume: false + volumeSize: + volumeDevicePath: + volumeType: + flavorName: + imageName: "ubuntu-20.04" + secGroups: default + keypairName: + sshUser: ubuntu + sshPort: 22 + activeTimeout: 900 + nodeGroupMaxSize: 10 + nodeGroupMinSize: 10 + unhealthyNodeTimeout: 5m + drainBeforeDelete: true + - name: wc + availabilityZone: zone-c + quantity: 1 + etcd: false + worker: true + netId: 427a8649-583c-4564-a3cf-d49be1f932b2 + controlplane: false + bootFromVolume: false + volumeSize: + volumeDevicePath: + volumeType: + flavorName: + imageName: "ubuntu-20.04" + secGroups: default + keypairName: + sshUser: ubuntu + sshPort: 22 + activeTimeout: 900 + nodeGroupMaxSize: 10 + nodeGroupMinSize: 10 + unhealthyNodeTimeout: 5m + drainBeforeDelete: true + - name: cpa + availabilityZone: zone-a + quantity: 1 + etcd: true + worker: false + netId: 427a8649-583c-4564-a3cf-d49be1f932b2 + controlplane: true + bootFromVolume: false + volumeSize: + volumeDevicePath: + volumeType: + flavorName: flavor-cp + imageName: "ubuntu-20.04" + secGroups: default + keypairName: + sshUser: ubuntu + sshPort: 22 + activeTimeout: 900 + nodeGroupMaxSize: 1 + nodeGroupMinSize: 1 + - name: cpb + availabilityZone: zone-b + quantity: 1 + etcd: true + worker: false + netId: 427a8649-583c-4564-a3cf-d49be1f932b2 + controlplane: true + bootFromVolume: false + volumeSize: + volumeDevicePath: + volumeType: + flavorName: flavor-cp + imageName: "ubuntu-20.04" + secGroups: default + keypairName: + sshUser: ubuntu + sshPort: 22 + activeTimeout: 900 + nodeGroupMaxSize: 1 + nodeGroupMinSize: 1 + - name: cpc + availabilityZone: zone-c + quantity: 1 + etcd: true + worker: false + netId: 427a8649-583c-4564-a3cf-d49be1f932b2 + controlplane: true + bootFromVolume: false + volumeSize: + volumeDevicePath: + volumeType: + flavorName: flavor-cp + imageName: "ubuntu-20.04" + secGroups: default + keypairName: + sshUser: ubuntu + sshPort: 22 + activeTimeout: 900 + nodeGroupMaxSize: 1 + nodeGroupMinSize: 1