diff --git a/Makefile b/Makefile index c74e0db6a7..9bbb9f4d46 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,8 @@ IMG_TAG := latest endif ifeq (${LOCAL_PROVIDER_VERSION},latest) -LOCAL_PROVIDER_VERSION := v0.0.0 +# Change this versions after release when required here and in e2e config (test/e2e/config/nutanix.yaml) +LOCAL_PROVIDER_VERSION := v1.3.99 endif # PLATFORMS is a list of platforms to build for. @@ -132,6 +133,21 @@ else GOBIN=$(shell go env GOBIN) endif +# Get latest git hash +GIT_COMMIT_HASH=$(shell git rev-parse HEAD) + +# Get the local image registry required for clusterctl upgrade tests +LOCAL_IMAGE_REGISTRY ?= localhost:5000 + +ifeq (${MAKECMDGOALS},test-e2e-clusterctl-upgrade) + IMG_TAG=e2e-${GIT_COMMIT_HASH} + IMG_REPO=${LOCAL_IMAGE_REGISTRY}/controller +endif + +ifeq (${MAKECMDGOALS},docker-build-e2e) + IMG_TAG=e2e-${GIT_COMMIT_HASH} +endif + # Setting SHELL to bash allows bash commands to be executed by recipes. # This is a requirement for 'setup-envtest.sh' in the test target. # Options are set to exit when a recipe line exits non-zero or a piped command fails. @@ -234,8 +250,8 @@ kind-delete: ## Delete the kind cluster .PHONY: build build: generate fmt ## Build manager binary. - GIT_COMMIT_HASH=`git rev-parse HEAD` && \ - go build -ldflags "-X main.gitCommitHash=$${GIT_COMMIT_HASH}" -o bin/manager main.go + echo "Git commit hash: ${GIT_COMMIT_HASH}" + go build -ldflags "-X main.gitCommitHash=${GIT_COMMIT_HASH}" -o bin/manager main.go .PHONY: run run: manifests generate fmt vet ## Run a controller from your host. @@ -243,8 +259,8 @@ run: manifests generate fmt vet ## Run a controller from your host. .PHONY: docker-build docker-build: $(KO) ## Build docker image with the manager. - GIT_COMMIT_HASH=`git rev-parse HEAD` && \ - KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=$${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS} -t ${IMG_TAG} -L . + echo "Git commit hash: ${GIT_COMMIT_HASH}" + KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS} -t ${IMG_TAG} -L . .PHONY: docker-push docker-push: $(KO) ## Push docker image with the manager. @@ -285,7 +301,10 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi ##@ Templates .PHONY: cluster-e2e-templates -cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 ## Generate cluster templates for all versions +cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 cluster-e2e-templates-v124 ## Generate cluster templates for all versions + +cluster-e2e-templates-v124: $(KUSTOMIZE) ## Generate cluster templates for CAPX v1.2.4 + $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template.yaml cluster-e2e-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alpha4 $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template.yaml @@ -297,7 +316,6 @@ cluster-e2e-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1 $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project.yaml - $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation.yaml @@ -316,7 +334,6 @@ cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates wi $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-additional-categories --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-no-nmt --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-project --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project.yaml - $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-ccm --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml $(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation.yaml @@ -327,15 +344,14 @@ cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates wi cluster-templates: $(KUSTOMIZE) ## Generate cluster templates for all flavors $(KUSTOMIZE) build $(TEMPLATES_DIR)/base > $(TEMPLATES_DIR)/cluster-template.yaml $(KUSTOMIZE) build $(TEMPLATES_DIR)/csi > $(TEMPLATES_DIR)/cluster-template-csi.yaml - $(KUSTOMIZE) build $(TEMPLATES_DIR)/ccm > $(TEMPLATES_DIR)/cluster-template-ccm.yaml ##@ Testing .PHONY: docker-build-e2e docker-build-e2e: $(KO) ## Build docker image with the manager with e2e tag. - GIT_COMMIT_HASH=`git rev-parse HEAD` && \ - KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=$${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS_E2E} -t e2e -L . - docker tag ko.local/cluster-api-provider-nutanix:e2e ${IMG_REPO}:e2e + echo "Git commit hash: ${GIT_COMMIT_HASH}" + KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS_E2E} -t ${IMG_TAG} -L . + docker tag ko.local/cluster-api-provider-nutanix:${IMG_TAG} ${IMG_REPO}:e2e .PHONY: prepare-local-clusterctl prepare-local-clusterctl: manifests kustomize cluster-templates ## Prepare overide file for local clusterctl. @@ -468,6 +484,12 @@ test-e2e-cilium-no-kubeproxy: .PHONY: test-e2e-all-cni test-e2e-all-cni: test-e2e test-e2e-calico test-e2e-flannel test-e2e-cilium test-e2e-cilium-no-kubeproxy +.PHONY: test-e2e-clusterctl-upgrade +test-e2e-clusterctl-upgrade: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates cluster-templates ## Run the end-to-end tests + echo "Image tag for E2E test is ${IMG_TAG}" + docker tag ko.local/cluster-api-provider-nutanix:${IMG_TAG} ${IMG_REPO}:${IMG_TAG} + docker push ${IMG_REPO}:${IMG_TAG} + GINKGO_SKIP="" GIT_COMMIT="${GIT_COMMIT_HASH}" $(MAKE) test-e2e-calico ## -------------------------------------- ## Hack / Tools diff --git a/api/v1alpha4/nutanixmachine_types.go b/api/v1alpha4/nutanixmachine_types.go index d2d6cbe974..c46d86f948 100644 --- a/api/v1alpha4/nutanixmachine_types.go +++ b/api/v1alpha4/nutanixmachine_types.go @@ -108,6 +108,7 @@ type NutanixMachineStatus struct { VmUUID string `json:"vmUUID,omitempty"` // NodeRef is a reference to the corresponding workload cluster Node if it exists. + // Deprecated: Do not use. Will be removed in a future release. // +optional NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` @@ -130,7 +131,6 @@ type NutanixMachineStatus struct { //+kubebuilder:printcolumn:name="Address",type="string",JSONPath=".status.addresses[0].address",description="The VM address" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="NutanixMachine ready status" // +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="NutanixMachine instance ID" -// +kubebuilder:printcolumn:name="NodeRef",type="string",JSONPath=".status.nodeRef.name",description="Corresponding workload cluster node" // NutanixMachine is the Schema for the nutanixmachines API type NutanixMachine struct { diff --git a/api/v1beta1/nutanixmachine_types.go b/api/v1beta1/nutanixmachine_types.go index 958c5f6eff..170011f60d 100644 --- a/api/v1beta1/nutanixmachine_types.go +++ b/api/v1beta1/nutanixmachine_types.go @@ -115,6 +115,7 @@ type NutanixMachineStatus struct { VmUUID string `json:"vmUUID,omitempty"` // NodeRef is a reference to the corresponding workload cluster Node if it exists. + // Deprecated: Do not use. Will be removed in a future release. // +optional NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` @@ -138,7 +139,6 @@ type NutanixMachineStatus struct { //+kubebuilder:printcolumn:name="Address",type="string",JSONPath=".status.addresses[0].address",description="The VM address" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="NutanixMachine ready status" // +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="NutanixMachine instance ID" -// +kubebuilder:printcolumn:name="NodeRef",type="string",JSONPath=".status.nodeRef.name",description="Corresponding workload cluster node" // NutanixMachine is the Schema for the nutanixmachines API type NutanixMachine struct { diff --git a/controllers/nutanixcluster_controller.go b/controllers/nutanixcluster_controller.go index 22f0029ae3..963b3d1853 100644 --- a/controllers/nutanixcluster_controller.go +++ b/controllers/nutanixcluster_controller.go @@ -333,9 +333,11 @@ func (r *NutanixClusterReconciler) reconcileCredentialRefDelete(ctx context.Cont log := ctrl.LoggerFrom(ctx) credentialRef, err := nutanixClient.GetCredentialRefForCluster(nutanixCluster) if err != nil { + log.Error(err, fmt.Sprintf("error occurred while getting credential ref for cluster %s", nutanixCluster.Name)) return err } if credentialRef == nil { + log.V(1).Info(fmt.Sprintf("Credential ref is nil for cluster %s. Ignoring since object must be deleted", nutanixCluster.Name)) return nil } log.V(1).Info(fmt.Sprintf("Credential ref is kind Secret for cluster %s. Continue with deletion of secret", nutanixCluster.Name)) @@ -360,7 +362,7 @@ func (r *NutanixClusterReconciler) reconcileCredentialRefDelete(ctx context.Cont if secret.DeletionTimestamp.IsZero() { log.Info(fmt.Sprintf("removing secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.Name)) - if err := r.Client.Delete(ctx, secret); err != nil { + if err := r.Client.Delete(ctx, secret); err != nil && !errors.IsNotFound(err) { return err } } diff --git a/controllers/nutanixcluster_controller_test.go b/controllers/nutanixcluster_controller_test.go index 16b98403a1..3de385cbe3 100644 --- a/controllers/nutanixcluster_controller_test.go +++ b/controllers/nutanixcluster_controller_test.go @@ -393,4 +393,172 @@ func TestNutanixClusterReconciler(t *testing.T) { }) }) }) + + _ = Describe("NutanixCluster reconcileCredentialRefDelete", func() { + Context("Delete credentials ref reconcile succeed", func() { + It("Should not return error", func() { + ctx := context.Background() + reconciler := &NutanixClusterReconciler{ + Client: k8sClient, + Scheme: runtime.NewScheme(), + } + + ntnxCluster := &infrav1.NutanixCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: infrav1.NutanixClusterSpec{ + PrismCentral: &credentialTypes.NutanixPrismEndpoint{ + // Adding port info to override default value (0) + Port: 9440, + CredentialRef: &credentialTypes.NutanixCredentialReference{ + Name: "test", + Namespace: "default", + Kind: "Secret", + }, + }, + }, + } + + ntnxSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + StringData: map[string]string{ + "credentials": "[{\"type\": \"basic_auth\", \"data\": { \"prismCentral\":{\"username\": \"nutanix_user\", \"password\": \"nutanix_pass\"}}}]", + }, + } + + // Create the NutanixSecret object + g.Expect(k8sClient.Create(ctx, ntnxSecret)).To(Succeed()) + + // Create the NutanixCluster object + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + defer func() { + err := k8sClient.Delete(ctx, ntnxCluster) + Expect(err).NotTo(HaveOccurred()) + }() + + // Add finalizer to Nutanix Secret + g.Expect(ctrlutil.AddFinalizer(ntnxSecret, infrav1.NutanixClusterCredentialFinalizer)).To(BeTrue()) + g.Expect(k8sClient.Update(ctx, ntnxSecret)).To(Succeed()) + + // Reconile Delete credential ref + err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster) + g.Expect(err).NotTo(HaveOccurred()) + + // Check that Nutanix Secret is deleted + g.Expect(k8sClient.Get(ctx, client.ObjectKey{ + Namespace: ntnxSecret.Namespace, + Name: ntnxSecret.Name, + }, ntnxSecret)).ToNot(Succeed()) + }) + }) + + Context("Delete credentials ref reconcile failed: no credential ref", func() { + It("Should return error", func() { + ctx := context.Background() + reconciler := &NutanixClusterReconciler{ + Client: k8sClient, + Scheme: runtime.NewScheme(), + } + + ntnxCluster := &infrav1.NutanixCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: infrav1.NutanixClusterSpec{ + PrismCentral: &credentialTypes.NutanixPrismEndpoint{ + // Adding port info to override default value (0) + Port: 9440, + }, + }, + } + + // Create the NutanixCluster object + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + defer func() { + err := k8sClient.Delete(ctx, ntnxCluster) + Expect(err).NotTo(HaveOccurred()) + }() + + // Reconile Delete credential ref + err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster) + g.Expect(err).To(HaveOccurred()) + }) + }) + + Context("Delete credentials ref reconcile failed: there is no secret", func() { + It("Should not return error", func() { + ctx := context.Background() + reconciler := &NutanixClusterReconciler{ + Client: k8sClient, + Scheme: runtime.NewScheme(), + } + + ntnxCluster := &infrav1.NutanixCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: infrav1.NutanixClusterSpec{ + PrismCentral: &credentialTypes.NutanixPrismEndpoint{ + // Adding port info to override default value (0) + Port: 9440, + CredentialRef: &credentialTypes.NutanixCredentialReference{ + Name: "test", + Namespace: "default", + Kind: "Secret", + }, + }, + }, + } + + // Create the NutanixCluster object + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + defer func() { + err := k8sClient.Delete(ctx, ntnxCluster) + Expect(err).NotTo(HaveOccurred()) + }() + + // Reconile Delete credential ref + err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster) + g.Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("Delete credentials ref reconcile failed: PrismCentral Info is null", func() { + It("Should not return error", func() { + ctx := context.Background() + reconciler := &NutanixClusterReconciler{ + Client: k8sClient, + Scheme: runtime.NewScheme(), + } + + ntnxCluster := &infrav1.NutanixCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: infrav1.NutanixClusterSpec{ + PrismCentral: nil, + }, + } + + // Create the NutanixCluster object + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + defer func() { + err := k8sClient.Delete(ctx, ntnxCluster) + Expect(err).NotTo(HaveOccurred()) + }() + + // Reconile Delete credential ref + err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster) + g.Expect(err).NotTo(HaveOccurred()) + }) + }) + }) } diff --git a/controllers/nutanixmachine_controller.go b/controllers/nutanixmachine_controller.go index 3dd2bcadf7..48ec100ceb 100644 --- a/controllers/nutanixmachine_controller.go +++ b/controllers/nutanixmachine_controller.go @@ -373,10 +373,6 @@ func (r *NutanixMachineReconciler) reconcileNormal(rctx *nctx.MachineContext) (r } log.Info(fmt.Sprintf("The NutanixMachine is ready, providerID: %s", rctx.NutanixMachine.Spec.ProviderID)) - if rctx.NutanixMachine.Status.NodeRef == nil { - return r.reconcileNode(rctx) - } - return reconcile.Result{}, nil } @@ -445,73 +441,6 @@ func (r *NutanixMachineReconciler) reconcileNormal(rctx *nctx.MachineContext) (r return reconcile.Result{}, nil } -// reconcileNode makes sure the NutanixMachine corresponding workload cluster node -// is ready and set its spec.providerID -func (r *NutanixMachineReconciler) reconcileNode(rctx *nctx.MachineContext) (reconcile.Result, error) { - log := ctrl.LoggerFrom(rctx.Context) - log.V(1).Info("Reconcile the workload cluster node to set its spec.providerID") - - clusterKey := apitypes.NamespacedName{ - Namespace: rctx.Cluster.Namespace, - Name: rctx.Cluster.Name, - } - remoteClient, err := nctx.GetRemoteClient(rctx.Context, r.Client, clusterKey) - if err != nil { - if r.isGetRemoteClientConnectionError(err) { - log.Info(fmt.Sprintf("Controlplane endpoint not yet responding. Requeuing: %v", err)) - return reconcile.Result{Requeue: true}, nil - } - log.Info(fmt.Sprintf("Failed to get the client to access remote workload cluster %s. %v", rctx.Cluster.Name, err)) - return reconcile.Result{}, err - } - - // Retrieve the remote node - nodeName := rctx.Machine.Name - node := &corev1.Node{} - nodeKey := apitypes.NamespacedName{ - Namespace: "", - Name: nodeName, - } - - if err := remoteClient.Get(rctx.Context, nodeKey, node); err != nil { - if apierrors.IsNotFound(err) { - log.Info(fmt.Sprintf("workload node %s not yet ready. Requeuing", nodeName)) - return reconcile.Result{Requeue: true}, nil - } else { - log.Error(err, fmt.Sprintf("failed to retrieve the remote workload cluster node %s", nodeName)) - return reconcile.Result{}, err - } - } - - // Set the NutanixMachine Status.NodeRef - if rctx.NutanixMachine.Status.NodeRef == nil { - rctx.NutanixMachine.Status.NodeRef = &corev1.ObjectReference{ - Kind: node.Kind, - APIVersion: node.APIVersion, - Name: node.Name, - UID: node.UID, - } - log.V(1).Info(fmt.Sprintf("Set NutanixMachine's status.nodeRef: %v", *rctx.NutanixMachine.Status.NodeRef)) - } - - // Update the node's Spec.ProviderID - patchHelper, err := patch.NewHelper(node, remoteClient) - if err != nil { - log.Error(err, fmt.Sprintf("failed to create patchHelper for the workload cluster node %s", nodeName)) - return reconcile.Result{}, err - } - - node.Spec.ProviderID = rctx.NutanixMachine.Spec.ProviderID - err = patchHelper.Patch(rctx.Context, node) - if err != nil { - log.Error(err, fmt.Sprintf("failed to patch the remote workload cluster node %s's spec.providerID", nodeName)) - return reconcile.Result{}, err - } - log.Info(fmt.Sprintf("Patched the workload node %s spec.providerID: %s", nodeName, node.Spec.ProviderID)) - - return reconcile.Result{}, nil -} - func (r *NutanixMachineReconciler) validateMachineConfig(rctx *nctx.MachineContext) error { if rctx.Machine.Spec.FailureDomain == nil { if len(rctx.NutanixMachine.Spec.Subnets) == 0 { diff --git a/hack/install-go.sh b/hack/install-go.sh index c58be995d9..711b025a7b 100755 --- a/hack/install-go.sh +++ b/hack/install-go.sh @@ -4,8 +4,7 @@ set -o errexit set -o nounset set -o pipefail -wget https://go.dev/dl/go1.21.4.linux-amd64.tar.gz +wget -q https://go.dev/dl/go1.21.4.linux-amd64.tar.gz rm -rf /usr/local/go && tar -C /usr/local -xzf go1.21.4.linux-amd64.tar.gz export PATH=$PATH:/usr/local/go/bin go version - diff --git a/metadata.yaml b/metadata.yaml index 217146edb6..dd9c958183 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -30,6 +30,9 @@ releaseSeries: - major: 1 minor: 2 contract: v1beta1 + - major: 1 + minor: 3 + contract: v1beta1 - major: 0 minor: 0 contract: v1beta1 diff --git a/scripts/ccm_nutanix_update.sh b/scripts/ccm_nutanix_update.sh index bb999b714d..bfb18bf297 100755 --- a/scripts/ccm_nutanix_update.sh +++ b/scripts/ccm_nutanix_update.sh @@ -22,4 +22,4 @@ helm template -n kube-system nutanix-cloud-provider nutanix/nutanix-cloud-provid --set prismCentralEndPoint='${NUTANIX_ENDPOINT}',prismCentralPort='${NUTANIX_PORT=9440}',prismCentralInsecure='${NUTANIX_INSECURE=false}' \ --set image.repository="\${CCM_REPO=$NUTANIX_CCM_REPO}",image.tag="\${CCM_TAG=v$NUTANIX_CCM_VERSION}" \ --set createSecret=false \ - > templates/ccm/nutanix-ccm.yaml + > templates/base/nutanix-ccm.yaml diff --git a/templates/ccm/ccm-patch.yaml b/templates/base/ccm-patch.yaml similarity index 100% rename from templates/ccm/ccm-patch.yaml rename to templates/base/ccm-patch.yaml diff --git a/templates/base/kustomization.yaml b/templates/base/kustomization.yaml index 76a87aed01..b4aa3b1508 100644 --- a/templates/base/kustomization.yaml +++ b/templates/base/kustomization.yaml @@ -1,6 +1,12 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - nutanix-ccm.yaml + bases: - ./cluster-with-kcp.yaml - ./secret.yaml @@ -8,3 +14,9 @@ bases: - ./nmt.yaml - ./md.yaml - ./mhc.yaml + - ./nutanix-ccm-crs.yaml + - ./nutanix-ccm-secret.yaml + +patchesStrategicMerge: +- ccm-patch.yaml + diff --git a/templates/ccm/nutanix-ccm-crs.yaml b/templates/base/nutanix-ccm-crs.yaml similarity index 100% rename from templates/ccm/nutanix-ccm-crs.yaml rename to templates/base/nutanix-ccm-crs.yaml diff --git a/templates/ccm/nutanix-ccm-secret.yaml b/templates/base/nutanix-ccm-secret.yaml similarity index 100% rename from templates/ccm/nutanix-ccm-secret.yaml rename to templates/base/nutanix-ccm-secret.yaml diff --git a/templates/ccm/nutanix-ccm.yaml b/templates/base/nutanix-ccm.yaml similarity index 100% rename from templates/ccm/nutanix-ccm.yaml rename to templates/base/nutanix-ccm.yaml diff --git a/templates/ccm/kustomization.yaml b/templates/ccm/kustomization.yaml deleted file mode 100644 index 358144c0a2..0000000000 --- a/templates/ccm/kustomization.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -configMapGenerator: -- name: nutanix-ccm - behavior: merge - files: - - nutanix-ccm.yaml - -resources: -- ../base/ -- nutanix-ccm-crs.yaml -- nutanix-ccm-secret.yaml - -patchesStrategicMerge: -- ccm-patch.yaml diff --git a/templates/cluster-template-ccm.yaml b/templates/cluster-template-ccm.yaml deleted file mode 100644 index d40bcafb75..0000000000 --- a/templates/cluster-template-ccm.yaml +++ /dev/null @@ -1,546 +0,0 @@ -apiVersion: v1 -binaryData: - ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} -kind: ConfigMap -metadata: - name: user-ca-bundle - namespace: ${NAMESPACE} ---- -apiVersion: v1 -data: - nutanix-ccm.yaml: | - --- - apiVersion: v1 - kind: ConfigMap - metadata: - name: user-ca-bundle - namespace: kube-system - binaryData: - ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} - --- - # Source: nutanix-cloud-provider/templates/rbac.yaml - apiVersion: v1 - kind: ServiceAccount - metadata: - name: cloud-controller-manager - namespace: kube-system - --- - # Source: nutanix-cloud-provider/templates/cm.yaml - kind: ConfigMap - apiVersion: v1 - metadata: - name: nutanix-config - namespace: kube-system - data: - nutanix_config.json: |- - { - "prismCentral": { - "address": "${NUTANIX_ENDPOINT}", - "port": ${NUTANIX_PORT=9440}, - "insecure": ${NUTANIX_INSECURE=false}, - "credentialRef": { - "kind": "secret", - "name": "nutanix-creds", - "namespace": "kube-system" - }, - "additionalTrustBundle": { - "kind": "ConfigMap", - "name": "user-ca-bundle", - "namespace": "kube-system" - } - }, - "enableCustomLabeling": ${CCM_CUSTOM_LABEL=false}, - "topologyDiscovery": { - "type": "Prism" - } - } - --- - # Source: nutanix-cloud-provider/templates/rbac.yaml - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - name: system:cloud-controller-manager - rules: - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update - - apiGroups: - - "" - resources: - - nodes - verbs: - - "*" - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch - - apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - apiGroups: - - "" - resources: - - endpoints - verbs: - - create - - get - - list - - watch - - update - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - --- - # Source: nutanix-cloud-provider/templates/rbac.yaml - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: system:cloud-controller-manager - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:cloud-controller-manager - subjects: - - kind: ServiceAccount - name: cloud-controller-manager - namespace: kube-system - --- - # Source: nutanix-cloud-provider/templates/cloud-provider-nutanix-deployment.yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - k8s-app: nutanix-cloud-controller-manager - name: nutanix-cloud-controller-manager - namespace: kube-system - spec: - replicas: 1 - selector: - matchLabels: - k8s-app: nutanix-cloud-controller-manager - strategy: - type: Recreate - template: - metadata: - labels: - k8s-app: nutanix-cloud-controller-manager - spec: - hostNetwork: true - priorityClassName: system-cluster-critical - nodeSelector: - node-role.kubernetes.io/control-plane: "" - serviceAccountName: cloud-controller-manager - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - k8s-app: nutanix-cloud-controller-manager - topologyKey: kubernetes.io/hostname - dnsPolicy: Default - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 120 - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - tolerationSeconds: 120 - - effect: NoSchedule - key: node.cloudprovider.kubernetes.io/uninitialized - operator: Exists - - effect: NoSchedule - key: node.kubernetes.io/not-ready - operator: Exists - containers: - - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.1}" - imagePullPolicy: IfNotPresent - name: nutanix-cloud-controller-manager - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - args: - - "--leader-elect=true" - - "--cloud-config=/etc/cloud/nutanix_config.json" - resources: - requests: - cpu: 100m - memory: 50Mi - volumeMounts: - - mountPath: /etc/cloud - name: nutanix-config-volume - readOnly: true - volumes: - - name: nutanix-config-volume - configMap: - name: nutanix-config -kind: ConfigMap -metadata: - name: nutanix-ccm ---- -apiVersion: v1 -kind: Secret -metadata: - name: ${CLUSTER_NAME} - namespace: ${NAMESPACE} -stringData: - credentials: "[\n {\n \"type\": \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n - \ \"username\": \"${NUTANIX_USER}\", \n \"password\": \"${NUTANIX_PASSWORD}\"\n - \ }\n }\n }\n]\n" ---- -apiVersion: v1 -kind: Secret -metadata: - name: nutanix-ccm-secret -stringData: - nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n - \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": - \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n \"username\": - \"${NUTANIX_USER}\",\n \"password\": \"${NUTANIX_PASSWORD}\"\n },\n - \ \"prismElements\": null\n }\n }\n ]\n" -type: addons.cluster.x-k8s.io/resource-set ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - name: nutanix-ccm-crs -spec: - clusterSelector: - matchLabels: - ccm: nutanix - resources: - - kind: ConfigMap - name: nutanix-ccm - - kind: Secret - name: nutanix-ccm-secret - - kind: ConfigMap - name: user-ca-bundle - strategy: ApplyOnce ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: KubeadmConfigTemplate -metadata: - name: ${CLUSTER_NAME}-kcfg-0 - namespace: ${NAMESPACE} -spec: - template: - spec: - joinConfiguration: - nodeRegistration: - kubeletExtraArgs: - cloud-provider: external - eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% - tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} - postKubeadmCommands: - - echo "after kubeadm call" > /var/log/postkubeadm.log - preKubeadmCommands: - - echo "before kubeadm call" > /var/log/prekubeadm.log - - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" - users: - - lockPassword: false - name: capiuser - sshAuthorizedKeys: - - ${NUTANIX_SSH_AUTHORIZED_KEY} - sudo: ALL=(ALL) NOPASSWD:ALL - verbosity: 10 ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - labels: - ccm: nutanix - cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} - name: ${CLUSTER_NAME} - namespace: ${NAMESPACE} -spec: - clusterNetwork: - pods: - cidrBlocks: - - 172.20.0.0/16 - serviceDomain: cluster.local - services: - cidrBlocks: - - 172.19.0.0/16 - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: KubeadmControlPlane - name: ${CLUSTER_NAME}-kcp - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: NutanixCluster - name: ${CLUSTER_NAME} ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineDeployment -metadata: - labels: - cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} - name: ${CLUSTER_NAME}-wmd - namespace: ${NAMESPACE} -spec: - clusterName: ${CLUSTER_NAME} - replicas: ${WORKER_MACHINE_COUNT} - selector: - matchLabels: {} - template: - metadata: - labels: - cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} - spec: - bootstrap: - configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: KubeadmConfigTemplate - name: ${CLUSTER_NAME}-kcfg-0 - clusterName: ${CLUSTER_NAME} - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: NutanixMachineTemplate - name: ${CLUSTER_NAME}-mt-0 - version: ${KUBERNETES_VERSION} ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: ${CLUSTER_NAME}-mhc - namespace: ${NAMESPACE} -spec: - clusterName: ${CLUSTER_NAME} - maxUnhealthy: 40% - nodeStartupTimeout: 10m0s - selector: - matchLabels: - cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} - unhealthyConditions: - - status: "False" - timeout: 5m0s - type: Ready - - status: Unknown - timeout: 5m0s - type: Ready - - status: "True" - timeout: 5m0s - type: MemoryPressure - - status: "True" - timeout: 5m0s - type: DiskPressure - - status: "True" - timeout: 5m0s - type: PIDPressure - - status: "True" - timeout: 5m0s - type: NetworkUnavailable ---- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: KubeadmControlPlane -metadata: - name: ${CLUSTER_NAME}-kcp - namespace: ${NAMESPACE} -spec: - kubeadmConfigSpec: - clusterConfiguration: - apiServer: - certSANs: - - localhost - - 127.0.0.1 - - 0.0.0.0 - extraArgs: - cloud-provider: external - tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} - controllerManager: - extraArgs: - cloud-provider: external - enable-hostpath-provisioner: "true" - tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} - scheduler: - extraArgs: - tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} - files: - - content: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-vip - namespace: kube-system - spec: - containers: - - name: kube-vip - image: ghcr.io/kube-vip/kube-vip:v0.6.4 - imagePullPolicy: IfNotPresent - args: - - manager - env: - - name: vip_arp - value: "true" - - name: address - value: "${CONTROL_PLANE_ENDPOINT_IP}" - - name: port - value: "${CONTROL_PLANE_ENDPOINT_PORT=6443}" - - name: vip_cidr - value: "32" - - name: cp_enable - value: "true" - - name: cp_namespace - value: kube-system - - name: vip_ddns - value: "false" - - name: vip_leaderelection - value: "true" - - name: vip_leaseduration - value: "15" - - name: vip_renewdeadline - value: "10" - - name: vip_retryperiod - value: "2" - - name: svc_enable - value: "${KUBEVIP_SVC_ENABLE=false}" - - name: lb_enable - value: "${KUBEVIP_LB_ENABLE=false}" - - name: enableServicesElection - value: "${KUBEVIP_SVC_ELECTION=false}" - securityContext: - capabilities: - add: - - NET_ADMIN - - SYS_TIME - - NET_RAW - volumeMounts: - - mountPath: /etc/kubernetes/admin.conf - name: kubeconfig - resources: {} - hostNetwork: true - hostAliases: - - hostnames: - - kubernetes - ip: 127.0.0.1 - volumes: - - name: kubeconfig - hostPath: - type: FileOrCreate - path: /etc/kubernetes/admin.conf - status: {} - owner: root:root - path: /etc/kubernetes/manifests/kube-vip.yaml - initConfiguration: - nodeRegistration: - kubeletExtraArgs: - cloud-provider: external - eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% - tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} - joinConfiguration: - nodeRegistration: - kubeletExtraArgs: - cloud-provider: external - eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% - tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} - postKubeadmCommands: - - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc - - echo "after kubeadm call" > /var/log/postkubeadm.log - preKubeadmCommands: - - echo "before kubeadm call" > /var/log/prekubeadm.log - - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" - useExperimentalRetryJoin: true - users: - - lockPassword: false - name: capiuser - sshAuthorizedKeys: - - ${NUTANIX_SSH_AUTHORIZED_KEY} - sudo: ALL=(ALL) NOPASSWD:ALL - verbosity: 10 - machineTemplate: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: NutanixMachineTemplate - name: ${CLUSTER_NAME}-mt-0 - replicas: ${CONTROL_PLANE_MACHINE_COUNT=1} - version: ${KUBERNETES_VERSION} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: NutanixCluster -metadata: - name: ${CLUSTER_NAME} - namespace: ${NAMESPACE} -spec: - controlPlaneEndpoint: - host: ${CONTROL_PLANE_ENDPOINT_IP} - port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} - prismCentral: - additionalTrustBundle: - kind: ConfigMap - name: user-ca-bundle - address: ${NUTANIX_ENDPOINT} - credentialRef: - kind: Secret - name: ${CLUSTER_NAME} - insecure: ${NUTANIX_INSECURE=false} - port: ${NUTANIX_PORT=9440} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: NutanixMachineTemplate -metadata: - name: ${CLUSTER_NAME}-mt-0 - namespace: ${NAMESPACE} -spec: - template: - spec: - bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} - cluster: - name: ${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME} - type: name - image: - name: ${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME} - type: name - memorySize: ${NUTANIX_MACHINE_MEMORY_SIZE=4Gi} - providerID: nutanix://${CLUSTER_NAME}-m1 - subnet: - - name: ${NUTANIX_SUBNET_NAME} - type: name - systemDiskSize: ${NUTANIX_SYSTEMDISK_SIZE=40Gi} - vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} - vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} diff --git a/templates/cluster-template-csi.yaml b/templates/cluster-template-csi.yaml index 4b44d61db5..1cd5b6eb12 100644 --- a/templates/cluster-template-csi.yaml +++ b/templates/cluster-template-csi.yaml @@ -7,6 +7,224 @@ metadata: namespace: ${NAMESPACE} --- apiVersion: v1 +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: user-ca-bundle + namespace: kube-system + binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cm.yaml + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT=9440}, + "insecure": ${NUTANIX_INSECURE=false}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": ${CCM_CUSTOM_LABEL=false}, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cloud-provider-nutanix-deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.1}" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +kind: ConfigMap +metadata: + name: nutanix-ccm +--- +apiVersion: v1 data: ns.yaml: |- apiVersion: v1 @@ -1335,6 +1553,35 @@ stringData: \ \"username\": \"${NUTANIX_USER}\", \n \"password\": \"${NUTANIX_PASSWORD}\"\n \ }\n }\n }\n]\n" --- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-ccm-secret +stringData: + nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n + \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": + \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n \"username\": + \"${NUTANIX_USER}\",\n \"password\": \"${NUTANIX_PASSWORD}\"\n },\n + \ \"prismElements\": null\n }\n }\n ]\n" +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: nutanix-ccm-crs +spec: + clusterSelector: + matchLabels: + ccm: nutanix + resources: + - kind: ConfigMap + name: nutanix-ccm + - kind: Secret + name: nutanix-ccm-secret + - kind: ConfigMap + name: user-ca-bundle + strategy: ApplyOnce +--- apiVersion: addons.cluster.x-k8s.io/v1beta1 kind: ClusterResourceSet metadata: @@ -1359,6 +1606,7 @@ spec: joinConfiguration: nodeRegistration: kubeletExtraArgs: + cloud-provider: external eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} postKubeadmCommands: @@ -1378,6 +1626,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + ccm: nutanix cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} csi: nutanix name: ${CLUSTER_NAME} @@ -1475,9 +1724,11 @@ spec: - 127.0.0.1 - 0.0.0.0 extraArgs: + cloud-provider: external tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} controllerManager: extraArgs: + cloud-provider: external enable-hostpath-provisioner: "true" tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} scheduler: @@ -1552,11 +1803,13 @@ spec: initConfiguration: nodeRegistration: kubeletExtraArgs: + cloud-provider: external eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} joinConfiguration: nodeRegistration: kubeletExtraArgs: + cloud-provider: external eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} postKubeadmCommands: diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index 2cf04a9b60..d40bcafb75 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -7,6 +7,224 @@ metadata: namespace: ${NAMESPACE} --- apiVersion: v1 +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: user-ca-bundle + namespace: kube-system + binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cm.yaml + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT=9440}, + "insecure": ${NUTANIX_INSECURE=false}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": ${CCM_CUSTOM_LABEL=false}, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cloud-provider-nutanix-deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.1}" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +kind: ConfigMap +metadata: + name: nutanix-ccm +--- +apiVersion: v1 kind: Secret metadata: name: ${CLUSTER_NAME} @@ -16,6 +234,35 @@ stringData: \ \"username\": \"${NUTANIX_USER}\", \n \"password\": \"${NUTANIX_PASSWORD}\"\n \ }\n }\n }\n]\n" --- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-ccm-secret +stringData: + nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n + \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": + \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n \"username\": + \"${NUTANIX_USER}\",\n \"password\": \"${NUTANIX_PASSWORD}\"\n },\n + \ \"prismElements\": null\n }\n }\n ]\n" +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: nutanix-ccm-crs +spec: + clusterSelector: + matchLabels: + ccm: nutanix + resources: + - kind: ConfigMap + name: nutanix-ccm + - kind: Secret + name: nutanix-ccm-secret + - kind: ConfigMap + name: user-ca-bundle + strategy: ApplyOnce +--- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate metadata: @@ -27,6 +274,7 @@ spec: joinConfiguration: nodeRegistration: kubeletExtraArgs: + cloud-provider: external eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} postKubeadmCommands: @@ -46,6 +294,7 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: + ccm: nutanix cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} name: ${CLUSTER_NAME} namespace: ${NAMESPACE} @@ -142,9 +391,11 @@ spec: - 127.0.0.1 - 0.0.0.0 extraArgs: + cloud-provider: external tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} controllerManager: extraArgs: + cloud-provider: external enable-hostpath-provisioner: "true" tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} scheduler: @@ -219,11 +470,13 @@ spec: initConfiguration: nodeRegistration: kubeletExtraArgs: + cloud-provider: external eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} joinConfiguration: nodeRegistration: kubeletExtraArgs: + cloud-provider: external eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% tls-cipher-suites: ${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256} postKubeadmCommands: diff --git a/test/e2e/basic_uuid_test.go b/test/e2e/basic_uuid_test.go index d59587a46f..e752fe1b32 100644 --- a/test/e2e/basic_uuid_test.go +++ b/test/e2e/basic_uuid_test.go @@ -23,21 +23,28 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/gstruct" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) var _ = Describe("Nutanix Basic Creation with UUID", Label("capx-feature-test", "uuid", "slow", "network"), func() { const ( - specName = "cluster-uuid" + specName = "cluster-uuid" + ccmInstanceTypeKey = "node.kubernetes.io/instance-type" + ccmInstanceType = "ahv-vm" + ccmZoneKey = "topology.kubernetes.io/zone" + ccmRegionKey = "topology.kubernetes.io/region" ) var ( - namespace *corev1.Namespace - clusterName string - clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult - cancelWatches context.CancelFunc - testHelper testHelperInterface + namespace *corev1.Namespace + clusterName string + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + cancelWatches context.CancelFunc + testHelper testHelperInterface + expectedCCMLabels []string ) BeforeEach(func() { @@ -46,6 +53,11 @@ var _ = Describe("Nutanix Basic Creation with UUID", Label("capx-feature-test", clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) + expectedCCMLabels = []string{ + ccmZoneKey, + ccmRegionKey, + ccmInstanceTypeKey, + } }) AfterEach(func() { @@ -76,6 +88,24 @@ var _ = Describe("Nutanix Basic Creation with UUID", Label("capx-feature-test", }, clusterResources) }) + By("Fetching workload proxy") + workloadProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterResources.Cluster.Name) + + By("Checking if nodes have correct CCM labels") + nodes, err := workloadProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + for _, n := range nodes.Items { + nodeLabels := n.Labels + Expect(nodeLabels).To(gstruct.MatchKeys(gstruct.IgnoreExtras, + gstruct.Keys{ + ccmInstanceTypeKey: Equal(ccmInstanceType), + }, + )) + for _, k := range expectedCCMLabels { + Expect(nodeLabels).To(HaveKey(k)) + } + } + By("PASSED!") }) }) diff --git a/test/e2e/ccm_test.go b/test/e2e/ccm_test.go deleted file mode 100644 index 60847e3301..0000000000 --- a/test/e2e/ccm_test.go +++ /dev/null @@ -1,103 +0,0 @@ -//go:build e2e - -/* -Copyright 2022 Nutanix - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/gstruct" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api/test/framework/clusterctl" -) - -var _ = Describe("Nutanix flavor CCM", Label("capx-feature-test", "ccm", "slow", "network"), func() { - const ( - specName = "cluster-ccm" - ccmInstanceTypeKey = "node.kubernetes.io/instance-type" - ccmInstanceType = "ahv-vm" - ccmZoneKey = "topology.kubernetes.io/zone" - ccmRegionKey = "topology.kubernetes.io/region" - ) - - var ( - namespace *corev1.Namespace - clusterName string - clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult - cancelWatches context.CancelFunc - testHelper testHelperInterface - expectedCCMLabels []string - ) - - BeforeEach(func() { - testHelper = newTestHelper(e2eConfig) - clusterName = testHelper.generateTestClusterName(specName) - clusterResources = &clusterctl.ApplyClusterTemplateAndWaitResult{} - Expect(bootstrapClusterProxy).NotTo(BeNil(), "BootstrapClusterProxy can't be nil") - namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) - expectedCCMLabels = []string{ - ccmZoneKey, - ccmRegionKey, - ccmInstanceTypeKey, - } - }) - - AfterEach(func() { - dumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, cancelWatches, clusterResources.Cluster, e2eConfig.GetIntervals, skipCleanup) - }) - - It("Create a cluster with Nutanix CCM", func() { - const flavor = "ccm" - - Expect(namespace).NotTo(BeNil()) - - By("Creating a workload cluster") - testHelper.deployClusterAndWait( - deployClusterParams{ - clusterName: clusterName, - namespace: namespace, - flavor: flavor, - clusterctlConfigPath: clusterctlConfigPath, - artifactFolder: artifactFolder, - bootstrapClusterProxy: bootstrapClusterProxy, - }, clusterResources) - - By("Fetching workload proxy") - workloadProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterResources.Cluster.Name) - - By("Checking if nodes have correct CCM labels") - nodes, err := workloadProxy.GetClientSet().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - Expect(err).ToNot(HaveOccurred()) - for _, n := range nodes.Items { - nodeLabels := n.Labels - Expect(nodeLabels).To(gstruct.MatchKeys(gstruct.IgnoreExtras, - gstruct.Keys{ - ccmInstanceTypeKey: Equal(ccmInstanceType), - }, - )) - for _, k := range expectedCCMLabels { - Expect(nodeLabels).To(HaveKey(k)) - } - } - - By("PASSED!") - }) -}) diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index d02c8db526..a63158dfde 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -19,20 +19,261 @@ limitations under the License. package e2e import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/blang/semver/v4" + "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/test/e2e/log" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + yaml "sigs.k8s.io/cluster-api/cmd/clusterctl/client/yamlprocessor" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// clusterctl upgrade test is being skipped. See 'GINKGO_SKIP' parameter in `Makefile` -var _ = Describe("When testing clusterctl upgrades [clusterctl-Upgrade]", Label("clusterctl-upgrade", "slow", "network"), func() { - capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { +var kubernetesVersion = getKubernetesVersion() + +func getKubernetesVersion() string { + if e2eConfig != nil { + if result, ok := e2eConfig.Variables["KUBERNETES_VERSION"]; ok { + return result + } + } else { + if result, ok := os.LookupEnv("KUBERNETES_VERSION"); ok { + return result + } + } + + return "undefined" +} + +var _ = Describe("[clusterctl-Upgrade] Upgrade CAPX (v1.2.4 => current) K8S "+kubernetesVersion, Label("clusterctl-upgrade", "slow", "network"), func() { + + preWaitForCluster := createPreWaitForClusterFunc(func() capi_e2e.ClusterctlUpgradeSpecInput { return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + } + }) + + postUpgradeFunc := createPostUpgradeFunc(func() capi_e2e.ClusterctlUpgradeSpecInput { + return capi_e2e.ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + } + }) + + capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { + return capi_e2e.ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.10/clusterctl-{OS}-{ARCH}", + InitWithKubernetesVersion: e2eConfig.GetVariable("KUBERNETES_VERSION"), + InitWithCoreProvider: "cluster-api:v1.3.10", + InitWithBootstrapProviders: []string{"kubeadm:v1.3.10"}, + InitWithControlPlaneProviders: []string{"kubeadm:v1.3.10"}, + InitWithInfrastructureProviders: []string{"nutanix:v1.2.4"}, + PreWaitForCluster: preWaitForCluster, + PostUpgrade: postUpgradeFunc, } }) }) + +func createPreWaitForClusterFunc(testInputFunc func() capi_e2e.ClusterctlUpgradeSpecInput) func(framework.ClusterProxy, string, string) { + return func(managementClusterProxy framework.ClusterProxy, mgmtClusterNamespace, mgmtClusterName string) { + testInput := testInputFunc() + Expect(testInput.E2EConfig).NotTo(BeNil(), "Invalid argument. testInput.E2EConfig can't be nil when calling createPreWaitForClusterFunc") + Expect(testInput.ArtifactFolder).NotTo(BeEmpty(), "Invalid argument. testInput.ArtifactFolder can't be empty when calling createPreWaitForClusterFunc") + Expect(testInput.E2EConfig.Variables).NotTo(BeNil(), "Invalid argument. testInput.E2EConfig.Variables can't be nil when calling createPreWaitForClusterFunc") + + By("Get latest version of CAPX provider") + + latestVersionString := "v1.2.4" + latestVersion, err := semver.ParseTolerant(latestVersionString) + Expect(err).NotTo(HaveOccurred()) + + nutanixProviderRepository := filepath.Join(testInput.ArtifactFolder, "repository", "infrastructure-nutanix") + + // Find the latest version of the CAPX provider defined for test + filepath.WalkDir(nutanixProviderRepository, func(path string, d os.DirEntry, err error) error { + if d.IsDir() { + version, err := semver.ParseTolerant(d.Name()) + if err == nil { + if latestVersion.Compare(version) < 0 { + latestVersion = version + latestVersionString = d.Name() + } + } + } + return nil + }) + + log.Infof("Latest version of CAPX provider found: %s", latestVersionString) + + latestVersionComponentsYamlFile := filepath.Join(nutanixProviderRepository, latestVersionString, "components.yaml") + + Byf("Replacing image in %s", latestVersionComponentsYamlFile) + + //load the components.yaml file + componentsYaml, err := ioutil.ReadFile(latestVersionComponentsYamlFile) + Expect(err).NotTo(HaveOccurred()) + + gitCommitHash := os.Getenv("GIT_COMMIT") + localImageRegistry := os.Getenv("LOCAL_IMAGE_REGISTRY") + currentCommitImage := fmt.Sprintf("image: %s/controller:e2e-%s", localImageRegistry, gitCommitHash) + + //replace the image + componentsYaml = bytes.ReplaceAll(componentsYaml, + []byte("image: ghcr.io/nutanix-cloud-native/cluster-api-provider-nutanix/controller:e2e"), + []byte(currentCommitImage), + ) + + //write the file back + err = ioutil.WriteFile(latestVersionComponentsYamlFile, componentsYaml, 0644) + Expect(err).NotTo(HaveOccurred()) + + Byf("Successfully replaced image in components.yaml with the image from the current commit: %s", currentCommitImage) + + } +} + +func createPostUpgradeFunc(testInputFunc func() capi_e2e.ClusterctlUpgradeSpecInput) func(framework.ClusterProxy) { + return func(managementClusterProxy framework.ClusterProxy) { + testInput := testInputFunc() + Expect(testInput.E2EConfig).NotTo(BeNil(), "Invalid argument. testInput.E2EConfig can't be nil when calling createPostUpgradeFunc") + Expect(testInput.ArtifactFolder).NotTo(BeEmpty(), "Invalid argument. testInput.ArtifactFolder can't be empty when calling createPostUpgradeFunc") + Expect(testInput.E2EConfig.Variables).NotTo(BeNil(), "Invalid argument. testInput.E2EConfig.Variables can't be nil when calling createPostUpgradeFunc") + + By("Installing Nutanix CCM") + + yamlProc := yaml.NewSimpleProcessor() + + latestVersionString := "v1.2.4" + latestVersion, err := semver.ParseTolerant(latestVersionString) + Expect(err).NotTo(HaveOccurred()) + + nutanixProviderRepository := filepath.Join(testInput.ArtifactFolder, "repository", "infrastructure-nutanix") + + // Find the latest version of the CAPX provider defined for test + filepath.WalkDir(nutanixProviderRepository, func(path string, d os.DirEntry, err error) error { + if d.IsDir() { + version, err := semver.ParseTolerant(d.Name()) + if err == nil { + if latestVersion.Compare(version) < 0 { + latestVersion = version + latestVersionString = d.Name() + } + } + } + return nil + }) + + // Load the Nutanix CCM manifest + manifestPath := filepath.Join(testInput.ArtifactFolder, "repository", "infrastructure-nutanix", latestVersionString, "ccm-update.yaml") + log.Debugf("Loading Nutanix CCM manifest from %s", manifestPath) + + template, err := os.ReadFile(manifestPath) + Expect(err).NotTo(HaveOccurred()) + + // Process the Nutanix CCM manifest + log.Debugf("Processing Nutanix CCM manifest") + processedTemplate, err := yamlProc.Process(template, func(varName string) (string, error) { + if !testInput.E2EConfig.HasVariable(varName) { + log.Debugf("Nutanix CCM manifest variable %s not found", varName) + return "", nil + } + + log.Debugf("Nutanix CCM manifest variable %s found", varName) + return testInput.E2EConfig.GetVariable(varName), nil + }) + Expect(err).NotTo(HaveOccurred()) + + // Apply the Nutanix CCM manifest + log.Debugf("Applying Nutanix CCM manifest") + err = managementClusterProxy.Apply(context.Background(), processedTemplate) + Expect(err).NotTo(HaveOccurred()) + + // Update Clusters with Nutanix CCM label + log.Debugf("Updating Clusters with Nutanix CCM label") + // List all clusters + clusterList := &clusterv1.ClusterList{} + err = managementClusterProxy.GetClient().List(context.Background(), clusterList) + Expect(err).NotTo(HaveOccurred()) + + clusterNames := []string{} + + // Update all clusters + for _, cluster := range clusterList.Items { + cluster.Labels["ccm"] = "nutanix" + err = managementClusterProxy.GetClient().Update(context.Background(), &cluster) + Expect(err).NotTo(HaveOccurred()) + clusterNames = append(clusterNames, cluster.Name) + log.Debugf("Updated cluster %s with Nutanix CCM label", cluster.Name) + } + + // Wait for Nutanix CCM to be ready + log.Debugf("Waiting for Nutanix CCM to be ready") + timeout := 5 * time.Minute + interval := 10 * time.Second + for _, clusterName := range clusterNames { + Eventually(func() error { + clusterProxy := managementClusterProxy.GetWorkloadCluster(context.Background(), "clusterctl-upgrade", clusterName) + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + err := clusterProxy.GetClient().Get(context.Background(), client.ObjectKey{ + Namespace: "kube-system", + Name: "nutanix-cloud-controller-manager", + }, u) + return err + }, timeout, interval).ShouldNot(HaveOccurred()) + } + + By("Update KubeadmConfigTemplate with kubeletExtraArgs cloud-provider: external") + // List all KubeadmConfigTemplates + kubeadmConfigTemplateList := &bootstrapv1.KubeadmConfigTemplateList{} + err = managementClusterProxy.GetClient().List(context.Background(), kubeadmConfigTemplateList) + Expect(err).NotTo(HaveOccurred()) + + // Update all KubeadmConfigTemplates + for _, kubeadmConfigTemplate := range kubeadmConfigTemplateList.Items { + if kubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration == nil { + kubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration = &bootstrapv1.JoinConfiguration{ + NodeRegistration: bootstrapv1.NodeRegistrationOptions{}, + } + } + if kubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs == nil { + kubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs = map[string]string{} + } + + kubeadmConfigTemplate.Spec.Template.Spec.JoinConfiguration.NodeRegistration.KubeletExtraArgs["cloud-provider"] = "external" + err = managementClusterProxy.GetClient().Update(context.Background(), &kubeadmConfigTemplate) + Expect(err).NotTo(HaveOccurred()) + log.Debugf("Updated KubeadmConfigTemplate %s/%s with kubeletExtraArgs cloud-provider: external", kubeadmConfigTemplate.Namespace, kubeadmConfigTemplate.Name) + } + + //TODO: KubeadmControlPlane extraArgs and kubeletExtraArgs changes test (maybe in a separate test) + } +} diff --git a/test/e2e/config/nutanix.yaml b/test/e2e/config/nutanix.yaml index d9ebf8dbe4..47f75a73d5 100644 --- a/test/e2e/config/nutanix.yaml +++ b/test/e2e/config/nutanix.yaml @@ -13,12 +13,31 @@ images: loadBehavior: mustLoad # ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS # Cluster API v1beta1 Preloads - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.4.1 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.3.10 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.4.1 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.3.10 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.4.1 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.3.10 loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.4.9 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.4.9 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.4.9 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.4 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.4 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.4 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.6.0 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.6.0 + loadBehavior: tryLoad + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.6.0 + loadBehavior: tryLoad + providers: - name: cluster-api @@ -42,9 +61,9 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.1 + - name: v1.3.10 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.10/core-components.yaml" type: "url" contract: v1beta1 files: @@ -52,6 +71,16 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: v1.4.9 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/core-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: BootstrapProvider versions: @@ -73,9 +102,19 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.1 + - name: v1.3.10 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.10/bootstrap-components.yaml" + type: "url" + contract: "v1beta1" + files: + - sourcePath: "../data/shared/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4.9 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/bootstrap-components.yaml" type: "url" contract: "v1beta1" files: @@ -104,9 +143,19 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: v1.4.1 + - name: v1.3.10 + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.10/control-plane-components.yaml" + type: "url" + contract: v1beta1 + files: + - sourcePath: "../data/shared/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.4.9 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.1/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.9/control-plane-components.yaml" type: "url" contract: v1beta1 files: @@ -187,7 +236,15 @@ providers: files: - sourcePath: "../../../metadata.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template.yaml" - - name: v1.2.0 # next; use manifest from source files + - name: v1.2.4 + type: url + value: https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/download/v1.2.4/infrastructure-components.yaml + contract: v1beta1 + files: + - sourcePath: "../../../metadata.yaml" + - sourcePath: "../data/infrastructure-nutanix/v1.2.4/cluster-template.yaml" + - sourcePath: "../data/infrastructure-nutanix/ccm-update.yaml" + - name: v1.3.99 # next; use manifest from source files type: kustomize value: "../../../config/default" contract: v1beta1 @@ -204,13 +261,13 @@ providers: - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-additional-categories.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-project.yaml" - - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-ccm.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-upgrades.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-kcp-scale-in.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-csi.yaml" - sourcePath: "../data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains.yaml" + - sourcePath: "../data/infrastructure-nutanix/ccm-update.yaml" variables: # Default variables for the e2e test; those values could be overridden via env variables, thus @@ -230,6 +287,7 @@ variables: KUBERNETES_VERSION: "v1.23.6" NUTANIX_SSH_AUTHORIZED_KEY: "" CONTROL_PLANE_ENDPOINT_IP: "" + CONTROL_PLANE_ENDPOINT_IP_V124: "" CONTROL_PLANE_MACHINE_COUNT: 3 WORKER_MACHINE_COUNT: 3 NUTANIX_PRISM_ELEMENT_CLUSTER_NAME: "" diff --git a/test/e2e/data/infrastructure-nutanix/ccm-update.yaml b/test/e2e/data/infrastructure-nutanix/ccm-update.yaml new file mode 100644 index 0000000000..598e5557f4 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/ccm-update.yaml @@ -0,0 +1,249 @@ +apiVersion: v1 +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ConfigMap + metadata: + name: user-ca-bundle + namespace: kube-system + binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cm.yaml + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT=9440}, + "insecure": ${NUTANIX_INSECURE=false}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": ${CCM_CUSTOM_LABEL=false}, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + # Source: nutanix-cloud-provider/templates/rbac.yaml + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + # Source: nutanix-cloud-provider/templates/cloud-provider-nutanix-deployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "${CCM_REPO=ghcr.io/nutanix-cloud-native/cloud-provider-nutanix/controller}:${CCM_TAG=v0.3.1}" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +kind: ConfigMap +metadata: + name: nutanix-ccm + namespace: ${PATCH_NAMESPACE=clusterctl-upgrade} +--- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-ccm-secret + namespace: ${PATCH_NAMESPACE=clusterctl-upgrade} +stringData: + nutanix-ccm-secret.yaml: "apiVersion: v1\nkind: Secret\nmetadata:\n name: nutanix-creds\n + \ namespace: kube-system\nstringData:\n credentials: |\n [\n {\n \"type\": + \"basic_auth\", \n \"data\": { \n \"prismCentral\":{\n \"username\": + \"${NUTANIX_USER}\",\n \"password\": \"${NUTANIX_PASSWORD}\"\n },\n + \ \"prismElements\": null\n }\n }\n ]\n" +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: nutanix-ccm-crs + namespace: ${PATCH_NAMESPACE=clusterctl-upgrade} +spec: + clusterSelector: + matchLabels: + ccm: nutanix + resources: + - kind: ConfigMap + name: nutanix-ccm + - kind: Secret + name: nutanix-ccm-secret + - kind: ConfigMap + name: user-ca-bundle + strategy: ApplyOnce diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cluster-with-kcp.yaml new file mode 100644 index 0000000000..b87787daee --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cluster-with-kcp.yaml @@ -0,0 +1,200 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + prismCentral: + address: "${NUTANIX_ENDPOINT}" + port: ${NUTANIX_PORT=9440} + insecure: ${NUTANIX_INSECURE=false} + credentialRef: + name: "${CLUSTER_NAME}" + kind: Secret + additionalTrustBundle: + name: user-ca-bundle + kind: ConfigMap + controlPlaneEndpoint: + host: "${CONTROL_PLANE_ENDPOINT_IP_V124}" + port: ${CONTROL_PLANE_ENDPOINT_PORT_V124=6443} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + clusterNetwork: + services: + cidrBlocks: ["172.19.0.0/16"] + pods: + cidrBlocks: ["172.20.0.0/16"] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "${CLUSTER_NAME}-kcp" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "${CLUSTER_NAME}" + +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "${CLUSTER_NAME}-kcp" + namespace: "${NAMESPACE}" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT=1} + version: ${KUBERNETES_VERSION} + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "${CLUSTER_NAME}-mt-0" + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + scheduler: + extraArgs: + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "${CONTROL_PLANE_ENDPOINT_IP_V124}" + - name: port + value: "${CONTROL_PLANE_ENDPOINT_PORT_V124=6443}" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "${KUBEVIP_SVC_ENABLE=false}" + - name: lb_enable + value: "${KUBEVIP_LB_ENABLE=false}" + - name: enableServicesElection + value: "${KUBEVIP_SVC_ELECTION=false}" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + users: + - name: capiuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - ${NUTANIX_SSH_AUTHORIZED_KEY} + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + - echo "after kubeadm call" > /var/log/postkubeadm.log + useExperimentalRetryJoin: true + verbosity: 10 + +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-kcfg-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10% + tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}" + users: + - name: capiuser + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - ${NUTANIX_SSH_AUTHORIZED_KEY} + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + postKubeadmCommands: + - echo "after kubeadm call" > /var/log/postkubeadm.log + verbosity: 10 + #useExperimentalRetryJoin: true diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cm.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cm.yaml new file mode 100644 index 0000000000..ff4d33af76 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cm.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: "${NAMESPACE}" +binaryData: + ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""} diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cni-patch.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cni-patch.yaml new file mode 100644 index 0000000000..48fb5e97b5 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/cni-patch.yaml @@ -0,0 +1,7 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cni: ${CLUSTER_NAME}-crs-cni + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" \ No newline at end of file diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/crs.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/crs.yaml new file mode 100644 index 0000000000..608f696def --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/crs.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-cni" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-cni" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-cni" + resources: + - name: "cni-${CLUSTER_NAME}-crs-cni" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/md.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/md.yaml new file mode 100644 index 0000000000..b5efd60dab --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/md.yaml @@ -0,0 +1,28 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + name: "${CLUSTER_NAME}-wmd" + namespace: "${NAMESPACE}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "${CLUSTER_NAME}-kcfg-0" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "${CLUSTER_NAME}-mt-0" + version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/mhc.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/mhc.yaml new file mode 100644 index 0000000000..7c6077e84a --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/mhc.yaml @@ -0,0 +1,31 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: "${CLUSTER_NAME}-mhc" + namespace: "${NAMESPACE}" +spec: + clusterName: "${CLUSTER_NAME}" + maxUnhealthy: 40% + nodeStartupTimeout: 10m0s + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + unhealthyConditions: + - type: Ready + status: "False" + timeout: 5m0s + - type: Ready + status: Unknown + timeout: 5m0s + - type: MemoryPressure + status: "True" + timeout: 5m0s + - type: DiskPressure + status: "True" + timeout: 5m0s + - type: PIDPressure + status: "True" + timeout: 5m0s + - type: NetworkUnavailable + status: "True" + timeout: 5m0s diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/nmt.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/nmt.yaml new file mode 100644 index 0000000000..90829f2a76 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/nmt.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_NAME}-mt-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/base/secret.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/secret.yaml new file mode 100644 index 0000000000..89771a709d --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/base/secret.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "${NUTANIX_USER}", + "password": "${NUTANIX_PASSWORD}" + } + } + } + ] diff --git a/test/e2e/data/infrastructure-nutanix/v1.2.4/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1.2.4/cluster-template/kustomization.yaml new file mode 100644 index 0000000000..e82ac639d1 --- /dev/null +++ b/test/e2e/data/infrastructure-nutanix/v1.2.4/cluster-template/kustomization.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +bases: + - ../base/cluster-with-kcp.yaml + - ../base/secret.yaml + - ../base/cm.yaml + - ../base/nmt.yaml + - ../base/md.yaml + - ../base/mhc.yaml + - ../base/crs.yaml + +patchesStrategicMerge: + - ../base/cni-patch.yaml \ No newline at end of file diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-ccm/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-ccm/kustomization.yaml deleted file mode 100644 index 475061f673..0000000000 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-ccm/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -bases: - - ../../../../../../templates/ccm/ - - ../base/crs.yaml - -patchesStrategicMerge: - - ../base/cni-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml index 2c20306fc0..459dd0533d 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-failure-domains/kustomization.yaml @@ -4,9 +4,19 @@ bases: - ../../../../../../templates/base/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml - failure-domain-nmt.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml - failure-domain-patch.yaml + - ../../../../../../templates/base/ccm-patch.yaml + diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml index 492b2829b2..689951fdca 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-kcp-remediation/kustomization.yaml @@ -4,8 +4,17 @@ bases: - ../../../../../../templates/base/cm.yaml - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml - ./mhc.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml + - ../../../../../../templates/base/ccm-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml index 08d37a2e41..ef15312586 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-md-remediation/kustomization.yaml @@ -4,9 +4,18 @@ bases: - ../../../../../../templates/base/cm.yaml - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml - ./mhc.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml - ./md.yaml + - ../../../../../../templates/base/ccm-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml index 231fee7b61..7e15027194 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nmt/kustomization.yaml @@ -4,7 +4,16 @@ bases: - ../../../../../../templates/base/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml + - ../../../../../../templates/base/ccm-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml index df272d1926..ce1814e967 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-nutanix-cluster/kustomization.yaml @@ -4,8 +4,17 @@ bases: - ../../../../../../templates/base/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml - ./nc.yaml + - ../../../../../../templates/base/ccm-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml index 2eeda63f56..34b8b0ccf7 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-no-secret/kustomization.yaml @@ -4,7 +4,16 @@ bases: - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml + - ../../../../../../templates/base/ccm-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml index 0e766f824a..90391ea7c6 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-project/kustomization.yaml @@ -5,8 +5,17 @@ bases: - ../../../../../../templates/base/nmt.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml - ./nmt.yaml + - ../../../../../../templates/base/ccm-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml index 507108495d..0a4f1b8f08 100644 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml +++ b/test/e2e/data/infrastructure-nutanix/v1beta1/cluster-template-upgrades/kustomization.yaml @@ -4,8 +4,17 @@ bases: - ../../../../../../templates/base/cm.yaml - ../../../../../../templates/base/md.yaml - ../../../../../../templates/base/mhc.yaml + - ../../../../../../templates/base/nutanix-ccm-crs.yaml + - ../../../../../../templates/base/nutanix-ccm-secret.yaml - ../base/crs.yaml - ./nmt.yaml +configMapGenerator: +- name: nutanix-ccm + behavior: merge + files: + - ../../../../../../templates/base/nutanix-ccm.yaml + patchesStrategicMerge: - ../base/cni-patch.yaml + - ../../../../../../templates/base/ccm-patch.yaml diff --git a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-ccm/kustomization.yaml b/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-ccm/kustomization.yaml deleted file mode 100644 index 2f3f12d1c1..0000000000 --- a/test/e2e/data/infrastructure-nutanix/v1beta1/no-kubeproxy/cluster-template-ccm/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -bases: - - ../../cluster-template-ccm/ - -patchesStrategicMerge: - - ../no-kubeproxy.yaml diff --git a/test/e2e/log/log.go b/test/e2e/log/log.go new file mode 100644 index 0000000000..2a7107c985 --- /dev/null +++ b/test/e2e/log/log.go @@ -0,0 +1,66 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2022 Nutanix + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" +) + +const ( + //Debug log level + LogDebug = "DEBUG" + + //Info log level + LogInfo = "INFO" + + //Warn log level + LogWarn = "WARN" + + //Error log level + LogError = "ERROR" +) + +// Debugf logs a debug message +func Debugf(format string, a ...interface{}) { + Logf(LogDebug, format, a...) +} + +// Infof logs an info message +func Infof(format string, a ...interface{}) { + Logf(LogInfo, format, a...) +} + +// Warnf logs a warning message +func Warnf(format string, a ...interface{}) { + Logf(LogWarn, format, a...) +} + +// Errorf logs an error message +func Errorf(format string, a ...interface{}) { + Logf(LogError, format, a...) +} + +// Logf logs a message with the given level +func Logf(level string, format string, a ...interface{}) { + msg := level + ": " + format + "\n" + fmt.Fprintf(GinkgoWriter, msg, a...) +}