From ebf7ce69bdf13d82796c253c2447e8c089c06b93 Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Fri, 7 Jun 2024 12:47:35 -0700 Subject: [PATCH] Remove kubelet extra args when kubelet configuration is set --- .../cloudstack/config/template-cp.yaml | 12 +- .../cloudstack/config/template-md.yaml | 5 + pkg/providers/cloudstack/template.go | 47 +- pkg/providers/cloudstack/template_test.go | 20 +- .../cloudstack/testdata/expected_kcp.yaml | 411 ++++++++++ .../cloudstack/testdata/expected_kct.yaml | 136 ++++ .../expected_results_main_node_labels_cp.yaml | 4 +- .../expected_results_main_node_labels_md.yaml | 2 +- pkg/providers/docker/config/template-cp.yaml | 10 + pkg/providers/docker/config/template-md.yaml | 5 + pkg/providers/docker/controlplane_test.go | 12 - pkg/providers/docker/docker.go | 86 ++- pkg/providers/docker/docker_test.go | 21 - .../docker/reconciler/reconciler_test.go | 9 - ...id_deployment_node_labels_cp_expected.yaml | 4 +- ...id_deployment_node_labels_md_expected.yaml | 2 +- pkg/providers/nutanix/config/cp-template.yaml | 10 + pkg/providers/nutanix/config/md-template.yaml | 5 + pkg/providers/nutanix/template.go | 44 +- pkg/providers/nutanix/template_test.go | 17 +- .../nutanix/testdata/expected_cp.yaml | 619 ++++++++++++++++ .../expected_results_node_taints_labels.yaml | 4 +- ...xpected_results_node_taints_labels_md.yaml | 2 +- .../nutanix/testdata/expected_wn.yaml | 91 +++ pkg/providers/vsphere/config/template-cp.yaml | 12 +- pkg/providers/vsphere/config/template-md.yaml | 5 + pkg/providers/vsphere/template.go | 47 +- pkg/providers/vsphere/template_test.go | 21 +- .../vsphere/testdata/expected_kcp.yaml | 700 ++++++++++++++++++ .../vsphere/testdata/expected_kct.yaml | 95 +++ .../expected_results_main_node_labels_cp.yaml | 4 +- .../expected_results_main_node_labels_md.yaml | 2 +- 32 files changed, 2348 insertions(+), 116 deletions(-) create mode 100644 pkg/providers/cloudstack/testdata/expected_kcp.yaml create mode 100644 pkg/providers/cloudstack/testdata/expected_kct.yaml create mode 100644 pkg/providers/nutanix/testdata/expected_cp.yaml create mode 100644 pkg/providers/nutanix/testdata/expected_wn.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_kcp.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_kct.yaml diff --git a/pkg/providers/cloudstack/config/template-cp.yaml b/pkg/providers/cloudstack/config/template-cp.yaml index ee9115402b30..b669d4b832b3 100644 --- a/pkg/providers/cloudstack/config/template-cp.yaml +++ b/pkg/providers/cloudstack/config/template-cp.yaml @@ -158,7 +158,7 @@ spec: files: {{- if .kubeletConfiguration }} - content: | -{{ .kubeletConfiguration | indent 8}} +{{ .kubeletConfiguration | indent 8 }} owner: root:root permissions: "0644" path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml @@ -309,10 +309,15 @@ spec: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: provider-id: cloudstack:///'{{`{{ ds.meta_data.instance_id }}`}}' +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} +{{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} {{- end }} name: "{{`{{ ds.meta_data.hostname }}`}}" {{- if .controlPlaneTaints }} @@ -335,10 +340,15 @@ spec: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: provider-id: cloudstack:///'{{`{{ ds.meta_data.instance_id }}`}}' +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} +{{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} {{- end }} name: "{{`{{ ds.meta_data.hostname }}`}}" {{- if .controlPlaneTaints }} diff --git a/pkg/providers/cloudstack/config/template-md.yaml b/pkg/providers/cloudstack/config/template-md.yaml index 0c6607befceb..c5c10ef7d3bb 100644 --- a/pkg/providers/cloudstack/config/template-md.yaml +++ b/pkg/providers/cloudstack/config/template-md.yaml @@ -27,10 +27,15 @@ spec: {{- end }} kubeletExtraArgs: provider-id: cloudstack:///'{{`{{ ds.meta_data.instance_id }}`}}' +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 12 }} +{{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 12 }} {{- end }} name: "{{`{{ ds.meta_data.hostname }}`}}" {{- if or (or .proxyConfig .registryMirrorMap) .kubeletConfiguration }} diff --git a/pkg/providers/cloudstack/template.go b/pkg/providers/cloudstack/template.go index eb6cb5fdf081..4548595e343c 100644 --- a/pkg/providers/cloudstack/template.go +++ b/pkg/providers/cloudstack/template.go @@ -118,9 +118,6 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro etcdExtraArgs := clusterapi.SecureEtcdTlsCipherSuitesExtraArgs() sharedExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs() - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). - Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig). Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). Append(clusterapi.APIServerExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.APIServerExtraArgs)). @@ -190,7 +187,6 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, "serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks, "apiserverExtraArgs": apiServerExtraArgs.ToPartialYaml(), - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "etcdExtraArgs": etcdExtraArgs.ToPartialYaml(), "etcdCipherSuites": crypto.SecureCipherSuitesString(), "controllermanagerExtraArgs": controllerManagerExtraArgs.ToPartialYaml(), @@ -262,12 +258,29 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + if _, ok := cpKubeletConfig["tlsCipherSuites"]; !ok { + cpKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := cpKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + cpKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(cpKubeletConfig) if err != nil { return nil, fmt.Errorf("error marshaling %v", err) } - values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + nodeLabelArgs := clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() } return values, nil @@ -349,9 +362,6 @@ func fillProxyConfigurations(values map[string]interface{}, clusterSpec *cluster func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration) (map[string]interface{}, error) { versionsBundle := clusterSpec.WorkerNodeGroupVersionsBundle(workerNodeGroupConfiguration) format := "cloud-config" - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) workerNodeGroupMachineSpec := workerMachineConfig(clusterSpec, workerNodeGroupConfiguration).Spec workerUser := workerNodeGroupMachineSpec.Users[0] @@ -376,7 +386,6 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration "workerSshUsername": workerNodeGroupMachineSpec.Users[0].Name, "cloudstackWorkerSshAuthorizedKey": workerSSHKey, "format": format, - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "eksaSystemNamespace": constants.EksaSystemNamespace, "workerNodeGroupName": fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name), "workerNodeGroupTaints": workerNodeGroupConfiguration.Taints, @@ -404,12 +413,32 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration if workerNodeGroupConfiguration.KubeletConfiguration != nil { wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + + if _, ok := wnKubeletConfig["tlsCipherSuites"]; !ok { + wnKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := wnKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + wnKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } + kcString, err := yaml.Marshal(wnKubeletConfig) if err != nil { return nil, fmt.Errorf("error marshaling %v", err) } values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + nodeLabelArgs := clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() } return values, nil diff --git a/pkg/providers/cloudstack/template_test.go b/pkg/providers/cloudstack/template_test.go index a027ae93a68e..5c2953ee5346 100644 --- a/pkg/providers/cloudstack/template_test.go +++ b/pkg/providers/cloudstack/template_test.go @@ -179,9 +179,17 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigWN( "maxPods": 20, }, } + spec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "temp-path", + }, + } builder := cloudstack.NewTemplateBuilder(time.Now) - _, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) + data, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(data).To(ContainSubstring("maxPods")) + t.Logf("\n data \n%v\n", string(data)) + test.AssertContentToFile(t, string(data), "testdata/expected_kct.yaml") } func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigCP(t *testing.T) { @@ -192,10 +200,18 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigCP( "maxPods": 20, }, } + spec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "temp-path", + }, + } spec.Cluster.Spec.ExternalEtcdConfiguration = nil builder := cloudstack.NewTemplateBuilder(time.Now) - _, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { + data, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) }) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(data).To(ContainSubstring("maxPods")) + t.Logf("\n data \n%v\n", string(data)) + test.AssertContentToFile(t, string(data), "testdata/expected_kcp.yaml") } diff --git a/pkg/providers/cloudstack/testdata/expected_kcp.yaml b/pkg/providers/cloudstack/testdata/expected_kcp.yaml new file mode 100644 index 000000000000..e81d67c83ecc --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_kcp.yaml @@ -0,0 +1,411 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackCluster + name: test +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + failureDomains: + - name: default-az-0 + zone: + id: + name: zone1 + network: + id: + name: net1 + domain: domain1 + account: admin + acsEndpoint: + name: global + namespace: eksa-system +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + extraArgs: + cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + maxPods: 20 + resolvConf: temp-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + name: "{{ ds.meta_data.hostname }}" + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + name: "{{ ds.meta_data.hostname }}" + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/kubernetes ] ; + then + mv /var/log/kubernetes /var/log/kubernetes-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/kubernetes && ln -s /data-small/var/log/kubernetes /var/log/kubernetes ; + else echo "/var/log/kubernetes already symlnk"; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + useExperimentalRetryJoin: true + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 3 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/kubernetes:/data-small/var/log/kubernetes + creationTimestamp: null + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - control-plane-anti-affinity + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +--- diff --git a/pkg/providers/cloudstack/testdata/expected_kct.yaml b/pkg/providers/cloudstack/testdata/expected_kct.yaml new file mode 100644 index 000000000000..e6183a1a2133 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/expected_kct.yaml @@ -0,0 +1,136 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + taints: [] + kubeletExtraArgs: + provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' + name: "{{ ds.meta_data.hostname }}" + files: + - content: | + maxPods: 20 + resolvConf: temp-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + preKubeadmCommands: + - swapoff -a + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + - >- + if [ ! -L /var/log/containers ] ; + then + mv /var/log/containers /var/log/containers-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/containers && ln -s /data-small/var/log/containers /var/log/containers ; + else echo "/var/log/containers already symlnk" ; + fi + - >- + if [ ! -L /var/log/pods ] ; + then + mv /var/log/pods /var/log/pods-$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10) ; + mkdir -p /data-small/var/log/pods && ln -s /data-small/var/log/pods /var/log/pods ; + else echo "/var/log/pods already symlnk" ; + fi + diskSetup: + filesystems: + - device: /dev/vdb1 + overwrite: false + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: data_disk + partitions: + - device: /dev/vdb + layout: true + overwrite: false + tableType: gpt + mounts: + - - LABEL=data_disk + - /data-small + users: + - name: mySshUsername + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: eksa-system +spec: + clusterName: test + replicas: 3 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 + kind: CloudStackMachineTemplate + name: + version: v1.21.2-eks-1-21-4 + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta3 +kind: CloudStackMachineTemplate +metadata: + annotations: + device.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /dev/vdb + filesystem.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: ext4 + label.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: data_disk + mountpath.diskoffering.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /data-small + symlinks.cloudstack.anywhere.eks.amazonaws.com/v1alpha1: /var/log/containers:/data-small/var/log/containers,/var/log/pods:/data-small/var/log/pods + creationTimestamp: null + namespace: eksa-system +spec: + template: + spec: + affinityGroupIDs: + - worker-affinity + details: + foo: bar + diskOffering: + customSizeInGB: 0 + device: /dev/vdb + filesystem: ext4 + label: data_disk + mountPath: /data-small + name: Small + offering: + name: m4-large + sshKey: "" + template: + name: kubernetes_1_21 + +--- + +--- diff --git a/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_cp.yaml b/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_cp.yaml index eee190b5d66e..80c13952ed81 100644 --- a/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_cp.yaml +++ b/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_cp.yaml @@ -323,8 +323,8 @@ spec: provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' read-only-port: "0" anonymous-auth: "false" - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar name: "{{ ds.meta_data.hostname }}" joinConfiguration: nodeRegistration: @@ -333,8 +333,8 @@ spec: provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' read-only-port: "0" anonymous-auth: "false" - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar name: "{{ ds.meta_data.hostname }}" preKubeadmCommands: - swapoff -a diff --git a/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_md.yaml b/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_md.yaml index 62855c097298..0b0012658dcc 100644 --- a/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_md.yaml +++ b/pkg/providers/cloudstack/testdata/expected_results_main_node_labels_md.yaml @@ -14,8 +14,8 @@ spec: provider-id: cloudstack:///'{{ ds.meta_data.instance_id }}' read-only-port: "0" anonymous-auth: "false" - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar name: "{{ ds.meta_data.hostname }}" preKubeadmCommands: - swapoff -a diff --git a/pkg/providers/docker/config/template-cp.yaml b/pkg/providers/docker/config/template-cp.yaml index b9f8434e7bc9..6f4a3d627ad7 100644 --- a/pkg/providers/docker/config/template-cp.yaml +++ b/pkg/providers/docker/config/template-cp.yaml @@ -222,11 +222,16 @@ spec: {{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock +{{- if not .kubeletConfiguration }} kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} +{{- end }} {{- if not .workerNodeGroupConfigurations }} taints: [] {{- end }} @@ -247,11 +252,16 @@ spec: {{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock +{{- if not .kubeletConfiguration }} kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} +{{- end }} {{- if not .workerNodeGroupConfigurations }} taints: [] {{- end }} diff --git a/pkg/providers/docker/config/template-md.yaml b/pkg/providers/docker/config/template-md.yaml index 8a806cc3d87f..d9b1e60cfcf0 100644 --- a/pkg/providers/docker/config/template-md.yaml +++ b/pkg/providers/docker/config/template-md.yaml @@ -25,11 +25,16 @@ spec: {{- else}} taints: [] {{- end }} +{{- if not .kubeletConfiguration }} kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 12 }} +{{- end }} {{- if or .registryMirrorMap .kubeletConfiguration }} files: {{- end }} diff --git a/pkg/providers/docker/controlplane_test.go b/pkg/providers/docker/controlplane_test.go index 7d2205139eda..b3930909dad3 100644 --- a/pkg/providers/docker/controlplane_test.go +++ b/pkg/providers/docker/controlplane_test.go @@ -98,18 +98,6 @@ func TestControlPlaneSpecNewCluster(t *testing.T) { g.Expect(cp.EtcdMachineTemplate).To(Equal(wantEtcdMachineTemplate)) } -func TestControlPlaneSpecNoKubeVersion(t *testing.T) { - g := NewWithT(t) - logger := test.NewNullLogger() - ctx := context.Background() - client := test.NewFakeKubeClient() - spec := testClusterSpec() - spec.Cluster.Spec.KubernetesVersion = "" - - _, err := docker.ControlPlaneSpec(ctx, logger, client, spec) - g.Expect(err).To(MatchError(ContainSubstring("generating docker control plane yaml spec"))) -} - func TestControlPlaneSpecUpdateMachineTemplates(t *testing.T) { g := NewWithT(t) logger := test.NewNullLogger() diff --git a/pkg/providers/docker/docker.go b/pkg/providers/docker/docker.go index 77e91dd9d908..c62bc1f2b051 100644 --- a/pkg/providers/docker/docker.go +++ b/pkg/providers/docker/docker.go @@ -281,17 +281,6 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro versionsBundle := clusterSpec.RootVersionsBundle() etcdExtraArgs := clusterapi.SecureEtcdTlsCipherSuitesExtraArgs() sharedExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs() - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). - Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) - - cgroupDriverArgs, err := kubeletCgroupDriverExtraArgs(clusterSpec.Cluster.Spec.KubernetesVersion) - if err != nil { - return nil, err - } - if cgroupDriverArgs != nil { - kubeletExtraArgs.Append(cgroupDriverArgs) - } apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig). Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). @@ -316,7 +305,6 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro "apiserverExtraArgs": apiServerExtraArgs.ToPartialYaml(), "controllermanagerExtraArgs": controllerManagerExtraArgs.ToPartialYaml(), "schedulerExtraArgs": sharedExtraArgs.ToPartialYaml(), - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "externalEtcdVersion": versionsBundle.KubeDistro.EtcdVersion, "eksaSystemNamespace": constants.EksaSystemNamespace, "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, @@ -361,40 +349,53 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + if _, ok := cpKubeletConfig["tlsCipherSuites"]; !ok { + cpKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := cpKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + cpKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(cpKubeletConfig) if err != nil { return nil, fmt.Errorf("marshaling control plane node Kubelet Configuration while building CAPI template %v", err) } values["kubeletConfiguration"] = string(kcString) + + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + + cgroupDriverArgs, err := kubeletCgroupDriverExtraArgs(clusterSpec.Cluster.Spec.KubernetesVersion) + if err != nil { + return nil, err + } + if cgroupDriverArgs != nil { + kubeletExtraArgs.Append(cgroupDriverArgs) + } + + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + nodeLabelArgs := clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() } return values, nil } func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration) (map[string]interface{}, error) { - kubeVersion := clusterSpec.Cluster.Spec.KubernetesVersion - if workerNodeGroupConfiguration.KubernetesVersion != nil { - kubeVersion = *workerNodeGroupConfiguration.KubernetesVersion - } versionsBundle := clusterSpec.WorkerNodeGroupVersionsBundle(workerNodeGroupConfiguration) - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) - cgroupDriverArgs, err := kubeletCgroupDriverExtraArgs(kubeVersion) - if err != nil { - return nil, err - } - if cgroupDriverArgs != nil { - kubeletExtraArgs.Append(cgroupDriverArgs) - } values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, "kindNodeImage": versionsBundle.EksD.KindNode.VersionedImage(), "eksaSystemNamespace": constants.EksaSystemNamespace, - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "workerReplicas": *workerNodeGroupConfiguration.Count, "workerNodeGroupName": fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name), "workerNodeGroupTaints": workerNodeGroupConfiguration.Taints, @@ -410,12 +411,43 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration if workerNodeGroupConfiguration.KubeletConfiguration != nil { wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + if _, ok := wnKubeletConfig["tlsCipherSuites"]; !ok { + wnKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := wnKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + wnKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(wnKubeletConfig) if err != nil { return nil, fmt.Errorf("marshaling Kubelet Configuration for worker node %s: %v", workerNodeGroupConfiguration.Name, err) } values["kubeletConfiguration"] = string(kcString) + } else { + kubeVersion := clusterSpec.Cluster.Spec.KubernetesVersion + if workerNodeGroupConfiguration.KubernetesVersion != nil { + kubeVersion = *workerNodeGroupConfiguration.KubernetesVersion + } + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + + cgroupDriverArgs, err := kubeletCgroupDriverExtraArgs(kubeVersion) + if err != nil { + return nil, err + } + if cgroupDriverArgs != nil { + kubeletExtraArgs.Append(cgroupDriverArgs) + } + + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + nodeLabelArgs := clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() } return values, nil diff --git a/pkg/providers/docker/docker_test.go b/pkg/providers/docker/docker_test.go index 774ee109f1e4..7cddf8f4fc5d 100644 --- a/pkg/providers/docker/docker_test.go +++ b/pkg/providers/docker/docker_test.go @@ -827,17 +827,6 @@ func TestDockerTemplateBuilderGenerateCAPISpecControlPlane(t *testing.T) { }, wantErr: nil, }, - { - name: "kube version not specified", - args: args{ - clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { - s.Cluster.Name = "test-cluster" - s.Cluster.Spec.KubernetesVersion = "" - }), - buildOptions: nil, - }, - wantErr: fmt.Errorf("error building template map for CP "), - }, { name: "kubelet config specified", args: args{ @@ -922,16 +911,6 @@ func TestDockerTemplateBuilderGenerateCAPISpecWorkers(t *testing.T) { args args wantErr error }{ - { - name: "kube version not specified", - args: args{ - clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { - s.Cluster.Name = "test-cluster" - s.Cluster.Spec.KubernetesVersion = "" - }), - }, - wantErr: fmt.Errorf("error building template map for MD "), - }, { name: "kubelet config specified", args: args{ diff --git a/pkg/providers/docker/reconciler/reconciler_test.go b/pkg/providers/docker/reconciler/reconciler_test.go index f8d0487694c5..90bbc921188a 100644 --- a/pkg/providers/docker/reconciler/reconciler_test.go +++ b/pkg/providers/docker/reconciler/reconciler_test.go @@ -336,15 +336,6 @@ func TestReconcileControlPlaneUnstackedEtcdSuccess(t *testing.T) { ) } -func TestReconcilerReconcileControlPlaneFailure(t *testing.T) { - tt := newReconcilerTest(t) - tt.createAllObjs() - spec := tt.buildSpec() - spec.Cluster.Spec.KubernetesVersion = "" - _, err := tt.reconciler().ReconcileControlPlane(tt.ctx, test.NewNullLogger(), spec) - tt.Expect(err).To(MatchError(ContainSubstring("generating docker control plane yaml spec"))) -} - type reconcilerTest struct { t testing.TB *WithT diff --git a/pkg/providers/docker/testdata/valid_deployment_node_labels_cp_expected.yaml b/pkg/providers/docker/testdata/valid_deployment_node_labels_cp_expected.yaml index 6422832f9a60..d45703aebe88 100644 --- a/pkg/providers/docker/testdata/valid_deployment_node_labels_cp_expected.yaml +++ b/pkg/providers/docker/testdata/valid_deployment_node_labels_cp_expected.yaml @@ -270,16 +270,16 @@ spec: kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% cgroup-driver: cgroupfs - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar joinConfiguration: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% cgroup-driver: cgroupfs - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar replicas: 3 version: v1.19.6-eks-1-19-2 --- diff --git a/pkg/providers/docker/testdata/valid_deployment_node_labels_md_expected.yaml b/pkg/providers/docker/testdata/valid_deployment_node_labels_md_expected.yaml index 1840a3cb0295..78dcb5f34161 100644 --- a/pkg/providers/docker/testdata/valid_deployment_node_labels_md_expected.yaml +++ b/pkg/providers/docker/testdata/valid_deployment_node_labels_md_expected.yaml @@ -13,8 +13,8 @@ spec: kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% cgroup-driver: cgroupfs - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar --- apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index 82acdc53dc66..28667b62e5e8 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -337,10 +337,15 @@ spec: # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 #cgroup-driver: cgroupfs +{{- if not .kubeletConfiguration }} eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} +{{- end }} {{- if .controlPlaneTaints }} taints: {{- range .controlPlaneTaints}} @@ -361,11 +366,16 @@ spec: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: cloud-provider: external +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} +{{- end }} {{- if .controlPlaneTaints }} taints: {{- range .controlPlaneTaints}} diff --git a/pkg/providers/nutanix/config/md-template.yaml b/pkg/providers/nutanix/config/md-template.yaml index b42cefe60e84..4d7717cc2ebc 100644 --- a/pkg/providers/nutanix/config/md-template.yaml +++ b/pkg/providers/nutanix/config/md-template.yaml @@ -123,10 +123,15 @@ spec: # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 #cgroup-driver: cgroupfs +{{- if not .kubeletConfiguration }} eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 12 }} +{{- end }} {{- if .workerNodeGroupTaints }} taints: {{- range .workerNodeGroupTaints}} diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index 24e93dcc03b2..8a52b0ecd093 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -168,9 +168,6 @@ func buildTemplateMapCP( Append(clusterapi.APIServerExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.APIServerExtraArgs)). Append(clusterapi.EtcdEncryptionExtraArgs(clusterSpec.Cluster.Spec.EtcdEncryption)) clusterapi.SetPodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig, apiServerExtraArgs) - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). - Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) auditPolicy, err := common.GetAuditPolicy(clusterSpec.Cluster.Spec.KubernetesVersion) if err != nil { @@ -200,7 +197,6 @@ func buildTemplateMapCP( "corednsVersion": versionsBundle.KubeDistro.CoreDNS.Tag, "etcdRepository": versionsBundle.KubeDistro.Etcd.Repository, "etcdImageTag": versionsBundle.KubeDistro.Etcd.Tag, - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "kubeVipImage": versionsBundle.Nutanix.KubeVip.VersionedImage(), "kubeVipSvcEnable": false, "kubeVipLBEnable": false, @@ -321,13 +317,30 @@ func buildTemplateMapCP( if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + if _, ok := cpKubeletConfig["tlsCipherSuites"]; !ok { + cpKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + if _, ok := cpKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + cpKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(cpKubeletConfig) if err != nil { return nil, fmt.Errorf("error marshaling %v", err) } values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + nodeLabelArgs := clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() } return values, nil @@ -337,9 +350,6 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1 versionsBundle := clusterSpec.WorkerNodeGroupVersionsBundle(workerNodeGroupConfiguration) format := "cloud-config" - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). - Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)) values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "eksaSystemNamespace": constants.EksaSystemNamespace, @@ -356,7 +366,6 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1 "imageIDType": workerNodeGroupMachineSpec.Image.Type, "imageName": workerNodeGroupMachineSpec.Image.Name, "imageUUID": workerNodeGroupMachineSpec.Image.UUID, - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "nutanixPEClusterIDType": workerNodeGroupMachineSpec.Cluster.Type, "nutanixPEClusterName": workerNodeGroupMachineSpec.Cluster.Name, "nutanixPEClusterUUID": workerNodeGroupMachineSpec.Cluster.UUID, @@ -408,13 +417,32 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1 if workerNodeGroupConfiguration.KubeletConfiguration != nil { wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + if _, ok := wnKubeletConfig["tlsCipherSuites"]; !ok { + wnKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := wnKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + wnKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(wnKubeletConfig) if err != nil { return nil, fmt.Errorf("error marshaling %v", err) } values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() } + + nodeLabelArgs := clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() + } + return values, nil } diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index 037081059f57..588eb2483079 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -103,6 +103,11 @@ func TestNewNutanixTemplateBuilderKubeletConfiguration(t *testing.T) { "maxPods": 20, }, } + buildSpec.Cluster.Spec.ClusterNetwork.DNS = anywherev1.DNS{ + ResolvConf: &anywherev1.ResolvConf{ + Path: "test-path", + }, + } buildSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -113,6 +118,11 @@ func TestNewNutanixTemplateBuilderKubeletConfiguration(t *testing.T) { spec, err := builder.GenerateCAPISpecControlPlane(buildSpec) assert.NoError(t, err) assert.NotNil(t, spec) + t.Logf("data\n %v\n", string(spec)) + + cpSpec, err := os.ReadFile("testdata/expected_cp.yaml") + assert.NoError(t, err) + assert.Equal(t, cpSpec, spec) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", @@ -123,6 +133,11 @@ func TestNewNutanixTemplateBuilderKubeletConfiguration(t *testing.T) { workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) + t.Logf("data\n %v\n", string(workerSpec)) + + wnSpec, err := os.ReadFile("testdata/expected_wn.yaml") + assert.NoError(t, err) + assert.Equal(t, wnSpec, workerSpec) } func TestNewNutanixTemplateBuilderGenerateCAPISpecControlPlaneFailure(t *testing.T) { @@ -480,7 +495,7 @@ func TestNewNutanixTemplateBuilderNodeTaintsAndLabels(t *testing.T) { expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_node_taints_labels.yaml") require.NoError(t, err) assert.Equal(t, expectedControlPlaneSpec, cpSpec) - + t.Logf("data \n %v\n", string(cpSpec)) workloadTemplateNames := map[string]string{ "eksa-unit-test": "eksa-unit-test", } diff --git a/pkg/providers/nutanix/testdata/expected_cp.yaml b/pkg/providers/nutanix/testdata/expected_cp.yaml new file mode 100644 index 000000000000..81569fb33035 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_cp.yaml @@ -0,0 +1,619 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + failureDomains: [] + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-eksa-unit-test" + kind: Secret + controlPlaneEndpoint: + host: "test-ip" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "eksa-unit-test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + replicas: 3 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test-ip" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eksa-unit-test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: eksa-unit-test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + resources: + - kind: ConfigMap + name: eksa-unit-test-nutanix-ccm + - kind: Secret + name: eksa-unit-test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml index 07326e5f9fee..6e440f19f2b0 100644 --- a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml @@ -316,8 +316,8 @@ spec: # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 #cgroup-driver: cgroupfs eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - node-labels: key1-cp=value1-cp,key2-cp=value2-cp tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: key1-cp=value1-cp,key2-cp=value2-cp taints: - key: key1 value: val1 @@ -329,8 +329,8 @@ spec: cloud-provider: external read-only-port: "0" anonymous-auth: "false" - node-labels: key1-cp=value1-cp,key2-cp=value2-cp tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: key1-cp=value1-cp,key2-cp=value2-cp taints: - key: key1 value: val1 diff --git a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml index b5f2952bd7f2..217297ef95fc 100644 --- a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml @@ -69,8 +69,8 @@ spec: # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 #cgroup-driver: cgroupfs eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - node-labels: key1-md=value1-md,key2-md=value2-md tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: key1-md=value1-md,key2-md=value2-md taints: - key: key1 value: val1 diff --git a/pkg/providers/nutanix/testdata/expected_wn.yaml b/pkg/providers/nutanix/testdata/expected_wn.yaml new file mode 100644 index 000000000000..2419f3bd1cf0 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_wn.yaml @@ -0,0 +1,91 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test-eksa-unit-test" + namespace: "eksa-system" +spec: + clusterName: "eksa-unit-test" + replicas: 4 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "eksa-unit-test" + clusterName: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "eksa-unit-test" + version: "v1.19.8-eks-1-19-4" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + template: + spec: + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + name: '{{ ds.meta_data.hostname }}' + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + files: + - content: | + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + +--- diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index 3715e523b022..c0e3a671c8ca 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -248,7 +248,7 @@ spec: files: {{- if .kubeletConfiguration }} - content: | -{{ .kubeletConfiguration | indent 8}} +{{ .kubeletConfiguration | indent 8 }} owner: root:root permissions: "0644" path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml @@ -408,10 +408,15 @@ spec: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: cloud-provider: external +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} +{{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} {{- end }} name: '{{`{{ ds.meta_data.hostname }}`}}' {{- if .controlPlaneTaints }} @@ -478,10 +483,15 @@ spec: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: cloud-provider: external +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} +{{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 10 }} {{- end }} name: '{{`{{ ds.meta_data.hostname }}`}}' {{- if .controlPlaneTaints }} diff --git a/pkg/providers/vsphere/config/template-md.yaml b/pkg/providers/vsphere/config/template-md.yaml index 62e32ed3f4b3..96ca6281fa97 100644 --- a/pkg/providers/vsphere/config/template-md.yaml +++ b/pkg/providers/vsphere/config/template-md.yaml @@ -71,6 +71,7 @@ spec: {{- end }} kubeletExtraArgs: cloud-provider: external +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .cgroupDriverSystemd}} @@ -78,6 +79,10 @@ spec: {{- end }} {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 12 }} +{{- end }} +{{- end }} +{{- if .nodeLabelArgs }} +{{ .nodeLabelArgs.ToYaml | indent 12 }} {{- end }} name: '{{"{{"}} ds.meta_data.hostname {{"}}"}}' {{- if or (and (ne .format "bottlerocket") (or .proxyConfig .registryMirrorMap)) .kubeletConfiguration }} diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index cbd6faac1b3f..23620d4c8804 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -140,9 +140,7 @@ func buildTemplateMapCP( format := "cloud-config" etcdExtraArgs := clusterapi.SecureEtcdTlsCipherSuitesExtraArgs() sharedExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs() - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). - Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) + apiServerExtraArgs := clusterapi.OIDCToExtraArgs(clusterSpec.OIDCConfig). Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). Append(clusterapi.APIServerExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.APIServerExtraArgs)). @@ -202,7 +200,6 @@ func buildTemplateMapCP( "apiserverExtraArgs": apiServerExtraArgs.ToPartialYaml(), "controllerManagerExtraArgs": controllerManagerExtraArgs.ToPartialYaml(), "schedulerExtraArgs": sharedExtraArgs.ToPartialYaml(), - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "format": format, "externalEtcdVersion": versionsBundle.KubeDistro.EtcdVersion, "etcdImage": versionsBundle.KubeDistro.EtcdImage.VersionedImage(), @@ -359,12 +356,29 @@ func buildTemplateMapCP( if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + if _, ok := cpKubeletConfig["tlsCipherSuites"]; !ok { + cpKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := cpKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + cpKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(cpKubeletConfig) if err != nil { return nil, fmt.Errorf("error marshaling %v", err) } - values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + nodeLabelArgs := clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() } if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { @@ -390,9 +404,6 @@ func buildTemplateMapMD( return nil, fmt.Errorf("could not find VersionsBundle") } format := "cloud-config" - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) firstUser := workerNodeGroupMachineSpec.Users[0] sshKey, err := common.StripSshAuthorizedKeyComment(firstUser.SshAuthorizedKeys[0]) @@ -420,7 +431,6 @@ func buildTemplateMapMD( "vsphereWorkerSshAuthorizedKey": sshKey, "format": format, "eksaSystemNamespace": constants.EksaSystemNamespace, - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "workerReplicas": *workerNodeGroupConfiguration.Count, "workerNodeGroupName": fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, workerNodeGroupConfiguration.Name), "workerNodeGroupTaints": workerNodeGroupConfiguration.Taints, @@ -503,12 +513,31 @@ func buildTemplateMapMD( if workerNodeGroupConfiguration.KubeletConfiguration != nil { wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + + if _, ok := wnKubeletConfig["tlsCipherSuites"]; !ok { + wnKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := wnKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + wnKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(wnKubeletConfig) if err != nil { return nil, fmt.Errorf("error marshaling %v", err) } values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + nodeLabelArgs := clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration) + if len(nodeLabelArgs) != 0 { + values["nodeLabelArgs"] = nodeLabelArgs.ToPartialYaml() } return values, nil diff --git a/pkg/providers/vsphere/template_test.go b/pkg/providers/vsphere/template_test.go index 4ca420d9945a..f1a5f0baa2f5 100644 --- a/pkg/providers/vsphere/template_test.go +++ b/pkg/providers/vsphere/template_test.go @@ -8,6 +8,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/aws/eks-anywhere/internal/test" + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/providers/vsphere" @@ -57,7 +58,7 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidEtcdSSHKey(t * ) } -func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigWN(t *testing.T) { +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigWN(t *testing.T) { g := NewWithT(t) spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml") spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ @@ -65,12 +66,18 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigW "maxPods": 20, }, } + spec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "test-path", + }, + } builder := vsphere.NewVsphereTemplateBuilder(time.Now) - _, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) + data, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) g.Expect(err).ToNot(HaveOccurred()) + test.AssertContentToFile(t, string(data), "testdata/expected_kct.yaml") } -func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigCP(t *testing.T) { +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigCP(t *testing.T) { g := NewWithT(t) spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml") spec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ @@ -78,11 +85,17 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigC "maxPods": 20, }, } + spec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "test-path", + }, + } builder := vsphere.NewVsphereTemplateBuilder(time.Now) - _, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { + data, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) }) g.Expect(err).ToNot(HaveOccurred()) + test.AssertContentToFile(t, string(data), "testdata/expected_kcp.yaml") } func TestTemplateBuilder_CertSANs(t *testing.T) { diff --git a/pkg/providers/vsphere/testdata/expected_kcp.yaml b/pkg/providers/vsphere/testdata/expected_kcp.yaml new file mode 100644 index 000000000000..7e40fc60b477 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_kcp.yaml @@ -0,0 +1,700 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 + kind: EtcdadmCluster + name: test-etcd + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-1 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-control-plane-1 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/etc/kubernetes/pki/etcd/ca.crt" + certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" + keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 3 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: cloud-config + cloudInitConfig: + version: 3.4.14 + installDir: "/usr/bin" + etcdReleaseURL: https://distro.eks.amazonaws.com/kubernetes-1-19/releases/4/artifacts/etcd/v3.4.14/etcd-linux-amd64-v3.4.14.tar.gz + preEtcdadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 4096 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +data: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + data: + vsphere_server.password: + vsphere_server.username: + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/testdata/expected_kct.yaml b/pkg/providers/vsphere/testdata/expected_kct.yaml new file mode 100644 index 000000000000..523bf1acac0d --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_kct.yaml @@ -0,0 +1,95 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + taints: [] + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data.hostname }}' + files: + - content: | + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: eksa-system +spec: + clusterName: test + replicas: 3 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: + version: v1.19.8-eks-1-19-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 4096 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' + +--- diff --git a/pkg/providers/vsphere/testdata/expected_results_main_node_labels_cp.yaml b/pkg/providers/vsphere/testdata/expected_results_main_node_labels_cp.yaml index 0d1e6f40e802..060efe92a290 100644 --- a/pkg/providers/vsphere/testdata/expected_results_main_node_labels_cp.yaml +++ b/pkg/providers/vsphere/testdata/expected_results_main_node_labels_cp.yaml @@ -338,8 +338,8 @@ spec: cloud-provider: external read-only-port: "0" anonymous-auth: "false" - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar name: '{{ ds.meta_data.hostname }}' joinConfiguration: nodeRegistration: @@ -348,8 +348,8 @@ spec: cloud-provider: external read-only-port: "0" anonymous-auth: "false" - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar name: '{{ ds.meta_data.hostname }}' preKubeadmCommands: - hostname "{{ ds.meta_data.hostname }}" diff --git a/pkg/providers/vsphere/testdata/expected_results_main_node_labels_md.yaml b/pkg/providers/vsphere/testdata/expected_results_main_node_labels_md.yaml index da253689c3a2..2915f54a65c5 100644 --- a/pkg/providers/vsphere/testdata/expected_results_main_node_labels_md.yaml +++ b/pkg/providers/vsphere/testdata/expected_results_main_node_labels_md.yaml @@ -14,8 +14,8 @@ spec: cloud-provider: external read-only-port: "0" anonymous-auth: "false" - node-labels: label1=foo,label2=bar tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: label1=foo,label2=bar name: '{{ ds.meta_data.hostname }}' preKubeadmCommands: - hostname "{{ ds.meta_data.hostname }}"