diff --git a/pkg/clusterapi/workers.go b/pkg/clusterapi/workers.go index 415501095838..188eb264efc8 100644 --- a/pkg/clusterapi/workers.go +++ b/pkg/clusterapi/workers.go @@ -2,6 +2,7 @@ package clusterapi import ( "context" + "reflect" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/equality" @@ -124,7 +125,10 @@ func GetKubeadmConfigTemplate(ctx context.Context, client kubernetes.Client, nam func KubeadmConfigTemplateEqual(new, old *kubeadmv1.KubeadmConfigTemplate) bool { // DeepDerivative treats empty map (length == 0) as unset field. We need to manually compare certain fields // such as taints, so that setting it to empty will trigger machine recreate + // The file check with deep equal has been added since the introduction of kubelet configuration in case users + // want to get rid of the files with that context. return kubeadmConfigTemplateTaintsEqual(new, old) && kubeadmConfigTemplateExtraArgsEqual(new, old) && + reflect.DeepEqual(new.Spec.Template.Spec.Files, old.Spec.Template.Spec.Files) && equality.Semantic.DeepDerivative(new.Spec, old.Spec) } diff --git a/pkg/clusterapi/workers_test.go b/pkg/clusterapi/workers_test.go index 746e90626b43..e1771472adb2 100644 --- a/pkg/clusterapi/workers_test.go +++ b/pkg/clusterapi/workers_test.go @@ -463,6 +463,49 @@ func TestKubeadmConfigTemplateEqual(t *testing.T) { }, want: false, }, + { + name: "diff spec files", + new: &kubeadmv1.KubeadmConfigTemplate{ + Spec: kubeadmv1.KubeadmConfigTemplateSpec{ + Template: kubeadmv1.KubeadmConfigTemplateResource{ + Spec: kubeadmv1.KubeadmConfigSpec{ + JoinConfiguration: &kubeadmv1.JoinConfiguration{ + NodeRegistration: kubeadmv1.NodeRegistrationOptions{ + Taints: []corev1.Taint{ + { + Key: "key", + }, + }, + }, + }, + Files: []kubeadmv1.File{ + { + Owner: "me", + }, + }, + }, + }, + }, + }, + old: &kubeadmv1.KubeadmConfigTemplate{ + Spec: kubeadmv1.KubeadmConfigTemplateSpec{ + Template: kubeadmv1.KubeadmConfigTemplateResource{ + Spec: kubeadmv1.KubeadmConfigSpec{ + JoinConfiguration: &kubeadmv1.JoinConfiguration{ + NodeRegistration: kubeadmv1.NodeRegistrationOptions{ + Taints: []corev1.Taint{ + { + Key: "key", + }, + }, + }, + }, + }, + }, + }, + }, + want: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/crypto/tls.go b/pkg/crypto/tls.go index 91ce599cba03..fd8b4ec72e8d 100644 --- a/pkg/crypto/tls.go +++ b/pkg/crypto/tls.go @@ -7,10 +7,10 @@ import ( // This is what we currently support as the default. In the future, // we can make this customizable and return a wider range of // supported names. -func secureCipherSuiteNames() []string { +func SecureCipherSuiteNames() []string { return []string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"} } func SecureCipherSuitesString() string { - return strings.Join(secureCipherSuiteNames(), ",") + return strings.Join(SecureCipherSuiteNames(), ",") } diff --git a/pkg/providers/tinkerbell/config/template-cp.yaml b/pkg/providers/tinkerbell/config/template-cp.yaml index 838deae8b863..5711bba483c4 100644 --- a/pkg/providers/tinkerbell/config/template-cp.yaml +++ b/pkg/providers/tinkerbell/config/template-cp.yaml @@ -161,14 +161,23 @@ spec: certificatesDir: /var/lib/kubeadm/pki {{- end }} initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: kubeletExtraArgs: provider-id: PROVIDER_ID +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} +{{- end }} +{{- if .cpNodeLabelArgs }} +{{ .cpNodeLabelArgs.ToYaml | indent 10 }} +{{- end }} {{- if not .workerNodeGroupConfigurations }} taints: [] {{- end }} @@ -184,6 +193,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} {{- if (eq .format "bottlerocket") }} pause: imageRepository: {{.pauseRepository}} @@ -223,11 +236,16 @@ spec: - DirAvailable--etc-kubernetes-manifests kubeletExtraArgs: provider-id: PROVIDER_ID +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 10 }} {{- end }} +{{- end }} +{{- if .cpNodeLabelArgs }} +{{ .cpNodeLabelArgs.ToYaml | indent 10 }} +{{- end }} {{- if not .workerNodeGroupConfigurations }} taints: [] {{- end }} @@ -243,6 +261,13 @@ spec: {{- end }} {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if not .cpSkipLoadBalancerDeployment }} - content: | apiVersion: v1 diff --git a/pkg/providers/tinkerbell/config/template-md.yaml b/pkg/providers/tinkerbell/config/template-md.yaml index 726b7a4eea5b..d451aa25ad6e 100644 --- a/pkg/providers/tinkerbell/config/template-md.yaml +++ b/pkg/providers/tinkerbell/config/template-md.yaml @@ -77,6 +77,10 @@ spec: template: spec: joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} {{- if (eq .format "bottlerocket") }} pause: imageRepository: {{.pauseRepository}} @@ -125,14 +129,26 @@ spec: {{- end }} kubeletExtraArgs: provider-id: PROVIDER_ID +{{- if not .kubeletConfiguration }} read-only-port: "0" anonymous-auth: "false" {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} -{{- if and (ne .format "bottlerocket") (or .proxyConfig .registryMirrorMap) }} +{{- end }} +{{- if .wnNodeLabelArgs }} +{{ .wnNodeLabelArgs.ToYaml | indent 12 }} +{{- end }} +{{- if or (and (ne .format "bottlerocket") (or .proxyConfig .registryMirrorMap)) .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 12 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if and .proxyConfig (ne .format "bottlerocket") }} - content: | [Service] diff --git a/pkg/providers/tinkerbell/template.go b/pkg/providers/tinkerbell/template.go index 1bae93348efe..998d8a45b73c 100644 --- a/pkg/providers/tinkerbell/template.go +++ b/pkg/providers/tinkerbell/template.go @@ -14,6 +14,7 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" @@ -66,9 +67,6 @@ func NewTemplateBuilder(datacenterSpec *v1alpha1.TinkerbellDatacenterConfigSpec, func (tb *TemplateBuilder) GenerateCAPISpecControlPlane(clusterSpec *cluster.Spec, buildOptions ...providers.BuildMapOption) (content []byte, err error) { cpTemplateConfig := clusterSpec.TinkerbellTemplateConfigs[tb.controlPlaneMachineSpec.TemplateRef.Name] bundle := clusterSpec.RootVersionsBundle() - if err != nil { - return nil, err - } var OSImageURL string if tinkerbellIP := clusterSpec.Cluster.HasTinkerbellIPAnnotation(); tinkerbellIP != "" { @@ -401,9 +399,6 @@ func buildTemplateMapCP( Append(clusterapi.AwsIamAuthExtraArgs(clusterSpec.AWSIamConfig)). Append(clusterapi.APIServerExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.APIServerExtraArgs)) clusterapi.SetPodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig, apiServerExtraArgs) - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)). - Append(clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration)) values := map[string]interface{}{ "auditPolicy": auditPolicy, @@ -430,7 +425,6 @@ func buildTemplateMapCP( "etcdImageTag": versionsBundle.KubeDistro.Etcd.Tag, "externalEtcdVersion": versionsBundle.KubeDistro.EtcdVersion, "etcdCipherSuites": crypto.SecureCipherSuitesString(), - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "hardwareSelector": controlPlaneMachineSpec.HardwareSelector, "controlPlaneTaints": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints, "workerNodeGroupConfigurations": clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations, @@ -510,6 +504,36 @@ func buildTemplateMapCP( values["bottlerocketSettings"] = brSettings } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + if _, ok := cpKubeletConfig["tlsCipherSuites"]; !ok { + cpKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := cpKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + cpKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("marshaling control plane node Kubelet Configuration while building CAPI template %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + cpNodeLabelArgs := clusterapi.ControlPlaneNodeLabelsExtraArgs(clusterSpec.Cluster.Spec.ControlPlaneConfiguration) + if len(cpNodeLabelArgs) != 0 { + values["cpNodeLabelArgs"] = cpNodeLabelArgs.ToPartialYaml() + } + return values, nil } @@ -523,14 +547,9 @@ func buildTemplateMapMD( versionsBundle := clusterSpec.WorkerNodeGroupVersionsBundle(workerNodeGroupConfiguration) format := "cloud-config" - kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). - Append(clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration)). - Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) - values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "eksaSystemNamespace": constants.EksaSystemNamespace, - "kubeletExtraArgs": kubeletExtraArgs.ToPartialYaml(), "format": format, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, "workerNodeGroupName": workerNodeGroupConfiguration.Name, @@ -586,6 +605,35 @@ func buildTemplateMapMD( values["bottlerocketSettings"] = brSettings } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + if _, ok := wnKubeletConfig["tlsCipherSuites"]; !ok { + wnKubeletConfig["tlsCipherSuites"] = crypto.SecureCipherSuiteNames() + } + + if _, ok := wnKubeletConfig["resolvConf"]; !ok { + if clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf != nil { + wnKubeletConfig["resolvConf"] = clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf.Path + } + } + + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("marshaling Kubelet Configuration for worker node %s: %v", workerNodeGroupConfiguration.Name, err) + } + + values["kubeletConfiguration"] = string(kcString) + } else { + kubeletExtraArgs := clusterapi.SecureTlsCipherSuitesExtraArgs(). + Append(clusterapi.ResolvConfExtraArgs(clusterSpec.Cluster.Spec.ClusterNetwork.DNS.ResolvConf)) + values["kubeletExtraArgs"] = kubeletExtraArgs.ToPartialYaml() + } + + wnNodeLabelArgs := clusterapi.WorkerNodeLabelsExtraArgs(workerNodeGroupConfiguration) + if len(wnNodeLabelArgs) != 0 { + values["wnNodeLabelArgs"] = wnNodeLabelArgs.ToPartialYaml() + } + return values, nil } diff --git a/pkg/providers/tinkerbell/template_test.go b/pkg/providers/tinkerbell/template_test.go index 58dc084b40fa..ce93ef7c8b05 100644 --- a/pkg/providers/tinkerbell/template_test.go +++ b/pkg/providers/tinkerbell/template_test.go @@ -5,9 +5,12 @@ import ( "time" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clusterapi" + "github.com/aws/eks-anywhere/pkg/utils/ptr" ) func TestGenerateTemplateBuilder(t *testing.T) { @@ -159,7 +162,6 @@ func TestTemplateBuilder_CertSANs(t *testing.T) { data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) g.Expect(err).ToNot(HaveOccurred()) - test.AssertContentToFile(t, string(data), tc.Output) } @@ -195,3 +197,100 @@ func TestTemplateBuilder(t *testing.T) { } } + +func TestTemplateBuilderCPKubeletConfig(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml", + Output: "testdata/expected_kcp.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + cpMachineCfg, _ := getControlPlaneMachineSpec(clusterSpec) + wngMachineCfgs, _ := getWorkerNodeGroupMachineSpec(clusterSpec) + tinkIPBefore := "0.0.0.0" + bldr := NewTemplateBuilder(&clusterSpec.TinkerbellDatacenter.Spec, cpMachineCfg, nil, wngMachineCfgs, tinkIPBefore, time.Now) + + clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + + clusterSpec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "test-path", + }, + } + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(data).To(ContainSubstring("maxPods")) + test.AssertContentToFile(t, string(data), tc.Output) + } +} + +func TestTemplateBuilderWNKubeletConfig(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_tinkerbell_api_server_cert_san_ip.yaml", + Output: "testdata/expected_kct.yaml", + }, + } { + g := NewWithT(t) + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{ + { + Name: "test", + Count: ptr.Int(1), + KubeletConfiguration: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + }, + MachineGroupRef: &v1alpha1.Ref{ + Name: "wn-ref", + Kind: v1alpha1.TinkerbellMachineConfigKind, + }, + }, + } + clusterSpec.TinkerbellMachineConfigs = map[string]*v1alpha1.TinkerbellMachineConfig{ + "wn-ref": { + Spec: v1alpha1.TinkerbellMachineConfigSpec{ + Users: []v1alpha1.UserConfiguration{ + { + SshAuthorizedKeys: []string{"ssh abcdef..."}, + Name: "user", + }, + }, + }, + }, + } + + clusterSpec.Cluster.Spec.ClusterNetwork.DNS = v1alpha1.DNS{ + ResolvConf: &v1alpha1.ResolvConf{ + Path: "test-path", + }, + } + + cpMachineCfg, _ := getControlPlaneMachineSpec(clusterSpec) + wngMachineCfgs, _ := getWorkerNodeGroupMachineSpec(clusterSpec) + tinkIPBefore := "0.0.0.0" + bldr := NewTemplateBuilder(&clusterSpec.TinkerbellDatacenter.Spec, cpMachineCfg, nil, wngMachineCfgs, tinkIPBefore, time.Now) + workerTemplateNames, kubeadmTemplateNames := clusterapi.InitialTemplateNamesForWorkers(clusterSpec) + data, err := bldr.GenerateCAPISpecWorkers(clusterSpec, workerTemplateNames, kubeadmTemplateNames) + g.Expect(err).ToNot(HaveOccurred()) + test.AssertContentToFile(t, string(data), tc.Output) + } +} diff --git a/pkg/providers/tinkerbell/testdata/expected_kcp.yaml b/pkg/providers/tinkerbell/testdata/expected_kcp.yaml new file mode 100644 index 000000000000..bd5a6f07f850 --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_kcp.yaml @@ -0,0 +1,350 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneEndpoint: + host: 0.0.0.0 + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellCluster + name: test +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.16-eks-1-21-4 + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + apiServer: + certSANs: + - 11.11.11.11 + extraArgs: + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + initConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + taints: [] + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + ignorePreflightErrors: + - DirAvailable--etc-kubernetes-manifests + kubeletExtraArgs: + provider-id: PROVIDER_ID + taints: [] + files: + - content: | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 0.0.0.0 + # kube-vip daemon in worker node watches for LoadBalancer services. + # When there is no worker node, make kube-vip in control-plane nodes watch + - name: svc_enable + value: "true" + - name: svc_election + value: "true" + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.581 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + users: + - name: tink-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: + replicas: 1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + type: node + templateOverride: | + global_timeout: 0 + id: "" + name: "" + tasks: null + version: "" + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellCluster +metadata: + name: test + namespace: eksa-system +spec: + imageLookupFormat: --kube-v1.21.2-eks-1-21-4.raw.gz + imageLookupBaseRegistry: / \ No newline at end of file diff --git a/pkg/providers/tinkerbell/testdata/expected_kct.yaml b/pkg/providers/tinkerbell/testdata/expected_kct.yaml new file mode 100644 index 000000000000..3c7312bdc243 --- /dev/null +++ b/pkg/providers/tinkerbell/testdata/expected_kct.yaml @@ -0,0 +1,162 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + pool: test + name: test-test + namespace: eksa-system +spec: + clusterName: test + replicas: 1 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + pool: test + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: test-test-1 + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: TinkerbellMachineTemplate + name: test-test-1 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: TinkerbellMachineTemplate +metadata: + name: test-test-1 + namespace: eksa-system +spec: + template: + spec: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + templateOverride: | + global_timeout: 6000 + id: "" + name: test + tasks: + - actions: + - environment: + COMPRESSED: "true" + DEST_DISK: '{{ index .Hardware.Disks 0 }}' + IMG_URL: https://ubuntu-1-21.gz + image: "" + name: stream-image + timeout: 600 + - environment: + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/netplan/config.yaml + DIRMODE: "0755" + FS_TYPE: ext4 + GID: "0" + MODE: "0644" + STATIC_NETPLAN: "true" + UID: "0" + image: "" + name: write-netplan + pid: host + timeout: 90 + - environment: + CONTENTS: 'network: {config: disabled}' + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg + DIRMODE: "0700" + FS_TYPE: ext4 + GID: "0" + MODE: "0600" + UID: "0" + image: "" + name: disable-cloud-init-network-capabilities + timeout: 90 + - environment: + CONTENTS: | + datasource: + Ec2: + metadata_urls: [http://0.0.0.0:50061,http://5.6.7.8:50061] + strict_id: false + manage_etc_hosts: localhost + warnings: + dsid_missing_source: off + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg + DIRMODE: "0700" + FS_TYPE: ext4 + GID: "0" + MODE: "0600" + UID: "0" + image: "" + name: add-tink-cloud-init-config + timeout: 90 + - environment: + CONTENTS: | + datasource: Ec2 + DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 2 }}' + DEST_PATH: /etc/cloud/ds-identify.cfg + DIRMODE: "0700" + FS_TYPE: ext4 + GID: "0" + MODE: "0600" + UID: "0" + image: "" + name: add-tink-cloud-init-ds-config + timeout: 90 + - image: "" + name: reboot-image + pid: host + timeout: 90 + volumes: + - /worker:/worker + name: test + volumes: + - /dev:/dev + - /dev/console:/dev/console + - /lib/firmware:/lib/firmware:ro + worker: '{{.device_1}}' + version: "0.1" + +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: test-test-1 + namespace: eksa-system +spec: + template: + spec: + joinConfiguration: + patches: + directory: /etc/kubernetes/patches + nodeRegistration: + kubeletExtraArgs: + provider-id: PROVIDER_ID + files: + - content: | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + maxPods: 20 + resolvConf: test-path + tlsCipherSuites: + - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml + users: + - name: user + sshAuthorizedKeys: + - 'ssh abcdef...' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + +--- diff --git a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_cp_node_labels.yaml b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_cp_node_labels.yaml index e4132e61cb26..19509b3d895e 100755 --- a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_cp_node_labels.yaml +++ b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_cp_node_labels.yaml @@ -63,8 +63,8 @@ spec: provider-id: PROVIDER_ID read-only-port: "0" anonymous-auth: "false" - node-labels: key1-cp=value1-cp,key2-cp=value2-cp tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: key1-cp=value1-cp,key2-cp=value2-cp joinConfiguration: nodeRegistration: ignorePreflightErrors: @@ -73,8 +73,8 @@ spec: provider-id: PROVIDER_ID read-only-port: "0" anonymous-auth: "false" - node-labels: key1-cp=value1-cp,key2-cp=value2-cp tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: key1-cp=value1-cp,key2-cp=value2-cp files: - content: | apiVersion: v1 diff --git a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_labels.yaml b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_labels.yaml index 46f95b516938..60cc765b3e0c 100755 --- a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_labels.yaml +++ b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_labels.yaml @@ -159,8 +159,8 @@ spec: provider-id: PROVIDER_ID read-only-port: "0" anonymous-auth: "false" - node-labels: key1-md=value1-md,key2-md=value2-md tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: key1-md=value1-md,key2-md=value2-md users: - name: tink-user sshAuthorizedKeys: diff --git a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_worker_version.yaml b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_worker_version.yaml index ed7e94af8c4e..09cc688b8d8d 100755 --- a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_worker_version.yaml +++ b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_md_node_worker_version.yaml @@ -159,8 +159,8 @@ spec: provider-id: PROVIDER_ID read-only-port: "0" anonymous-auth: "false" - node-labels: key1-md=value1-md,key2-md=value2-md tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + node-labels: key1-md=value1-md,key2-md=value2-md users: - name: tink-user sshAuthorizedKeys: