diff --git a/pkg/crypto/tls.go b/pkg/crypto/tls.go index 91ce599cba03f..fd8b4ec72e8d8 100644 --- a/pkg/crypto/tls.go +++ b/pkg/crypto/tls.go @@ -7,10 +7,10 @@ import ( // This is what we currently support as the default. In the future, // we can make this customizable and return a wider range of // supported names. -func secureCipherSuiteNames() []string { +func SecureCipherSuiteNames() []string { return []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"} } func SecureCipherSuitesString() string { - return strings.Join(secureCipherSuiteNames(), ",") + return strings.Join(SecureCipherSuiteNames(), ",") } diff --git a/pkg/providers/tinkerbell/config/template-cp.yaml b/pkg/providers/tinkerbell/config/template-cp.yaml index 0ecf54748ebb3..034af17734a3f 100644 --- a/pkg/providers/tinkerbell/config/template-cp.yaml +++ b/pkg/providers/tinkerbell/config/template-cp.yaml @@ -162,7 +162,7 @@ spec: {{- end }} initConfiguration: {{- if .kubeletConfiguration }} - patches: + patches: directory: /etc/kubernetes/patches {{- end }} nodeRegistration: @@ -191,7 +191,7 @@ spec: {{- end }} joinConfiguration: {{- if .kubeletConfiguration }} - patches: + patches: directory: /etc/kubernetes/patches {{- end }} {{- if (eq .format "bottlerocket") }} diff --git a/pkg/providers/tinkerbell/config/template-md.yaml b/pkg/providers/tinkerbell/config/template-md.yaml index 1b5e8fa24adf5..bd80dccf991f4 100644 --- a/pkg/providers/tinkerbell/config/template-md.yaml +++ b/pkg/providers/tinkerbell/config/template-md.yaml @@ -76,7 +76,7 @@ spec: spec: joinConfiguration: {{- if .kubeletConfiguration }} - patches: + patches: directory: /etc/kubernetes/patches {{- end }} {{- if (eq .format "bottlerocket") }} diff --git a/pkg/providers/tinkerbell/template.go b/pkg/providers/tinkerbell/template.go index 63473b8e64fb5..bc2271d339293 100644 --- a/pkg/providers/tinkerbell/template.go +++ b/pkg/providers/tinkerbell/template.go @@ -506,6 +506,16 @@ func buildTemplateMapCP( if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + if _, ok := cpKubeletConfig["tlsCipherSuites"]; !ok { + cpKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := cpKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + cpKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } kcString, err := yaml.Marshal(cpKubeletConfig) if err != nil { return nil, fmt.Errorf("marshaling control plane node Kubelet Configuration while building CAPI template %v", err) @@ -593,6 +603,16 @@ func buildTemplateMapMD( if workerNodeGroupConfiguration.KubeletConfiguration != nil { wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + if _, ok := wnKubeletConfig["tlsCipherSuites"]; !ok { + wnKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := wnKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + wnKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } + kcString, err := yaml.Marshal(wnKubeletConfig) if err != nil { return nil, fmt.Errorf("marshaling Kubelet Configuration for worker node %s: %v", workerNodeGroupConfiguration.Name, err) diff --git a/pkg/providers/tinkerbell/template_test.go b/pkg/providers/tinkerbell/template_test.go index 0355d2e0c2eb5..c31cfa539507c 100644 --- a/pkg/providers/tinkerbell/template_test.go +++ b/pkg/providers/tinkerbell/template_test.go @@ -205,7 +205,7 @@ func TestTemplateBuilderCPKubeletConfig(t *testing.T) { }{ { Input: "testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml", - Output: "testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml", + Output: "testdata/expected_kcp.yaml", }, } { g := NewWithT(t) @@ -223,9 +223,17 @@ func TestTemplateBuilderCPKubeletConfig(t *testing.T) { }, } + clusterSpec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "test-path", + }, + } + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) g.Expect(err).ToNot(HaveOccurred()) g.Expect(data).To(ContainSubstring("maxPods")) + t.Logf("\n data %v\n", string(data)) + test.AssertContentToFile(t, string(data), tc.Output) } } @@ -236,7 +244,7 @@ func TestTemplateBuilderWNKubeletConfig(t *testing.T) { }{ { Input: "testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml", - Output: "testdata/expected_cluster_tinkerbell_api_server_cert_san_ip.yaml", + Output: "testdata/expected_kct.yaml", }, } { g := NewWithT(t) @@ -271,6 +279,12 @@ func TestTemplateBuilderWNKubeletConfig(t *testing.T) { }, } + clusterSpec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "test-path", + }, + } + cpMachineCfg, _ := getControlPlaneMachineSpec(clusterSpec) wngMachineCfgs, _ := getWorkerNodeGroupMachineSpec(clusterSpec) tinkIPBefore := "0.0.0.0" @@ -278,6 +292,7 @@ func TestTemplateBuilderWNKubeletConfig(t *testing.T) { workerTemplateNames, kubeadmTemplateNames := clusterapi.InitialTemplateNamesForWorkers(clusterSpec) data, err := bldr.GenerateCAPISpecWorkers(clusterSpec, workerTemplateNames, kubeadmTemplateNames) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(data).To(ContainSubstring("maxPods")) + t.Logf("\n data %v\n", string(data)) + test.AssertContentToFile(t, string(data), tc.Output) } } diff --git a/pkg/providers/tinkerbell/testdata/expected_kcp.yaml b/pkg/providers/tinkerbell/testdata/expected_kcp.yaml new file mode 100644 index 0000000000000..bd5a6f07f850f --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_kcp.yaml @@ -0,0 +1,350 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellCluster + name: test +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + initConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + taints: [] + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + ignorePreflightErrors: + - DirAvailable--etc-kubernetes-manifests + kubeletExtraArgs: + provider-id: PROVIDER_ID + taints: [] + files: + - content: | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + # kube-vip daemon in worker node watches for LoadBalancer services. + # When there is no worker node, make kube-vip in control-plane nodes watch + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + users: + - name: tink-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + type: node + templateOverride: | + global_timeout: 0 + id: "" + name: "" + tasks: null + version: "" + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellCluster +metadata: + name: test + namespace: eksa-system +spec: + imageLookupFormat: --kube-v1.21.2-eks-1-21-4.raw.gz + imageLookupBaseRegistry: / \ No newline at end of file diff --git a/pkg/providers/tinkerbell/testdata/expected_kct.yaml b/pkg/providers/tinkerbell/testdata/expected_kct.yaml new file mode 100644 index 0000000000000..3c7312bdc243f --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_kct.yaml @@ -0,0 +1,162 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + pool: test + name: test-test + namespace: eksa-system +spec: + clusterName: test + replicas: 1 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + pool: test + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: test-test-1 + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: test-test-1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: test-test-1 + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + templateOverride: | + global_timeout: 6000 + id: "" + name: test + tasks: + - actions: + - environment: + COMPRESSED: "true" + DEST_DISK: '{{ index .Hardware.Disks 0 }}' + IMG_URL: https://ubuntu-1-21.gz + image: "" + name: stream-image + timeout: 600 + - environment: + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/netplan/config.yaml + DIRMODE: "0755" + FS_TYPE: ext4 + GID: "0" + MODE: "0644" + STATIC_NETPLAN: "true" + UID: "0" + image: "" + name: write-netplan + pid: host + timeout: 90 + - environment: + CONTENTS: 'network: {config: disabled}' + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg + DIRMODE: "0700" + FS_TYPE: ext4 + GID: "0" + MODE: "0600" + UID: "0" + image: "" + name: disable-cloud-init-network-capabilities + timeout: 90 + - environment: + CONTENTS: | + datasource: + Ec2: + metadata_urls: [http://0.0.0.0:50061,http://5.6.7.8:50061] + strict_id: false + manage_etc_hosts: localhost + warnings: + dsid_missing_source: off + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg + DIRMODE: "0700" + FS_TYPE: ext4 + GID: "0" + MODE: "0600" + UID: "0" + image: "" + name: add-tink-cloud-init-config + timeout: 90 + - environment: + CONTENTS: | + datasource: Ec2 + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/cloud/ds-identify.cfg + DIRMODE: "0700" + FS_TYPE: ext4 + GID: "0" + MODE: "0600" + UID: "0" + image: "" + name: add-tink-cloud-init-ds-config + timeout: 90 + - image: "" + name: reboot-image + pid: host + timeout: 90 + volumes: + - /worker:/worker + name: test + volumes: + - /dev:/dev + - /dev/console:/dev/console + - /lib/firmware:/lib/firmware:ro + worker: '{{.device_1}}' + version: "0.1" + +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: test-test-1 + namespace: eksa-system +spec: + template: + spec: + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + files: + - content: | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + users: + - name: user + sshAuthorizedKeys: + - 'ssh abcdef...' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + +---