Skip to content

Commit

Permalink
Make CCM for CAPX by default
Browse files Browse the repository at this point in the history
 - get rid of node reconcile
 - manifests changes
 - makefile changes
 - tests
  • Loading branch information
adiantum committed Dec 13, 2023
1 parent 0e12aba commit 9490283
Show file tree
Hide file tree
Showing 43 changed files with 1,834 additions and 786 deletions.
46 changes: 34 additions & 12 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ IMG_TAG := latest
endif

ifeq (${LOCAL_PROVIDER_VERSION},latest)
LOCAL_PROVIDER_VERSION := v0.0.0
# Change this versions after release when required here and in e2e config (test/e2e/config/nutanix.yaml)
LOCAL_PROVIDER_VERSION := v1.3.99
endif

# PLATFORMS is a list of platforms to build for.
Expand Down Expand Up @@ -132,6 +133,21 @@ else
GOBIN=$(shell go env GOBIN)
endif

# Get latest git hash
GIT_COMMIT_HASH=$(shell git rev-parse HEAD)

# Get the local image registry required for clusterctl upgrade tests
LOCAL_IMAGE_REGISTRY ?= localhost:5000

ifeq (${MAKECMDGOALS},test-e2e-clusterctl-upgrade)
IMG_TAG=e2e-${GIT_COMMIT_HASH}
IMG_REPO=${LOCAL_IMAGE_REGISTRY}/controller
endif

ifeq (${MAKECMDGOALS},docker-build-e2e)
IMG_TAG=e2e-${GIT_COMMIT_HASH}
endif

# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
Expand Down Expand Up @@ -234,17 +250,17 @@ kind-delete: ## Delete the kind cluster

.PHONY: build
build: generate fmt ## Build manager binary.
GIT_COMMIT_HASH=`git rev-parse HEAD` && \
go build -ldflags "-X main.gitCommitHash=$${GIT_COMMIT_HASH}" -o bin/manager main.go
echo "Git commit hash: ${GIT_COMMIT_HASH}"
go build -ldflags "-X main.gitCommitHash=${GIT_COMMIT_HASH}" -o bin/manager main.go

.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go

.PHONY: docker-build
docker-build: $(KO) ## Build docker image with the manager.
GIT_COMMIT_HASH=`git rev-parse HEAD` && \
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=$${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS} -t ${IMG_TAG} -L .
echo "Git commit hash: ${GIT_COMMIT_HASH}"
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS} -t ${IMG_TAG} -L .

.PHONY: docker-push
docker-push: $(KO) ## Push docker image with the manager.
Expand Down Expand Up @@ -285,7 +301,10 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi
##@ Templates

.PHONY: cluster-e2e-templates
cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 ## Generate cluster templates for all versions
cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 cluster-e2e-templates-v124 ## Generate cluster templates for all versions

cluster-e2e-templates-v124: $(KUSTOMIZE) ## Generate cluster templates for CAPX v1.2.4
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template.yaml

cluster-e2e-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alpha4
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template.yaml
Expand All @@ -297,7 +316,6 @@ cluster-e2e-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation.yaml
Expand All @@ -316,7 +334,6 @@ cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates wi
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-additional-categories --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-no-nmt --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-project --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-ccm --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation.yaml
Expand All @@ -327,15 +344,14 @@ cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates wi
cluster-templates: $(KUSTOMIZE) ## Generate cluster templates for all flavors
$(KUSTOMIZE) build $(TEMPLATES_DIR)/base > $(TEMPLATES_DIR)/cluster-template.yaml
$(KUSTOMIZE) build $(TEMPLATES_DIR)/csi > $(TEMPLATES_DIR)/cluster-template-csi.yaml
$(KUSTOMIZE) build $(TEMPLATES_DIR)/ccm > $(TEMPLATES_DIR)/cluster-template-ccm.yaml

##@ Testing

.PHONY: docker-build-e2e
docker-build-e2e: $(KO) ## Build docker image with the manager with e2e tag.
GIT_COMMIT_HASH=`git rev-parse HEAD` && \
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=$${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS_E2E} -t e2e -L .
docker tag ko.local/cluster-api-provider-nutanix:e2e ${IMG_REPO}:e2e
echo "Git commit hash: ${GIT_COMMIT_HASH}"
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS_E2E} -t ${IMG_TAG} -L .
docker tag ko.local/cluster-api-provider-nutanix:${IMG_TAG} ${IMG_REPO}:e2e

.PHONY: prepare-local-clusterctl
prepare-local-clusterctl: manifests kustomize cluster-templates ## Prepare overide file for local clusterctl.
Expand Down Expand Up @@ -468,6 +484,12 @@ test-e2e-cilium-no-kubeproxy:
.PHONY: test-e2e-all-cni
test-e2e-all-cni: test-e2e test-e2e-calico test-e2e-flannel test-e2e-cilium test-e2e-cilium-no-kubeproxy

.PHONY: test-e2e-clusterctl-upgrade
test-e2e-clusterctl-upgrade: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates cluster-templates ## Run the end-to-end tests
echo "Image tag for E2E test is ${IMG_TAG}"
docker tag ko.local/cluster-api-provider-nutanix:${IMG_TAG} ${IMG_REPO}:${IMG_TAG}
docker push ${IMG_REPO}:${IMG_TAG}
GINKGO_SKIP="" GIT_COMMIT="${GIT_COMMIT_HASH}" $(MAKE) test-e2e-calico

## --------------------------------------
## Hack / Tools
Expand Down
1 change: 1 addition & 0 deletions api/v1beta1/nutanixmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ type NutanixMachineStatus struct {
VmUUID string `json:"vmUUID,omitempty"`

// NodeRef is a reference to the corresponding workload cluster Node if it exists.
// Deprecated: Do not use. Will be removed in a future release.
// +optional
NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"`

Expand Down
4 changes: 3 additions & 1 deletion controllers/nutanixcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -333,9 +333,11 @@ func (r *NutanixClusterReconciler) reconcileCredentialRefDelete(ctx context.Cont
log := ctrl.LoggerFrom(ctx)
credentialRef, err := nutanixClient.GetCredentialRefForCluster(nutanixCluster)
if err != nil {
log.Error(err, fmt.Sprintf("error occurred while getting credential ref for cluster %s", nutanixCluster.Name))
return err
}
if credentialRef == nil {
log.V(1).Info(fmt.Sprintf("Credential ref is nil for cluster %s. Ignoring since object must be deleted", nutanixCluster.Name))
return nil
}
log.V(1).Info(fmt.Sprintf("Credential ref is kind Secret for cluster %s. Continue with deletion of secret", nutanixCluster.Name))
Expand All @@ -360,7 +362,7 @@ func (r *NutanixClusterReconciler) reconcileCredentialRefDelete(ctx context.Cont

if secret.DeletionTimestamp.IsZero() {
log.Info(fmt.Sprintf("removing secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.Name))
if err := r.Client.Delete(ctx, secret); err != nil {
if err := r.Client.Delete(ctx, secret); err != nil && !errors.IsNotFound(err) {
return err
}
}
Expand Down
168 changes: 168 additions & 0 deletions controllers/nutanixcluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -393,4 +393,172 @@ func TestNutanixClusterReconciler(t *testing.T) {
})
})
})

_ = Describe("NutanixCluster reconcileCredentialRefDelete", func() {
Context("Delete credentials ref reconcile succeed", func() {
It("Should not return error", func() {
ctx := context.Background()
reconciler := &NutanixClusterReconciler{
Client: k8sClient,
Scheme: runtime.NewScheme(),
}

ntnxCluster := &infrav1.NutanixCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Spec: infrav1.NutanixClusterSpec{
PrismCentral: &credentialTypes.NutanixPrismEndpoint{
// Adding port info to override default value (0)
Port: 9440,
CredentialRef: &credentialTypes.NutanixCredentialReference{
Name: "test",
Namespace: "default",
Kind: "Secret",
},
},
},
}

ntnxSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
StringData: map[string]string{
"credentials": "[{\"type\": \"basic_auth\", \"data\": { \"prismCentral\":{\"username\": \"nutanix_user\", \"password\": \"nutanix_pass\"}}}]",
},
}

// Create the NutanixSecret object
g.Expect(k8sClient.Create(ctx, ntnxSecret)).To(Succeed())

// Create the NutanixCluster object
g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed())
defer func() {
err := k8sClient.Delete(ctx, ntnxCluster)
Expect(err).NotTo(HaveOccurred())
}()

// Add finalizer to Nutanix Secret
g.Expect(ctrlutil.AddFinalizer(ntnxSecret, infrav1.NutanixClusterCredentialFinalizer)).To(BeTrue())
g.Expect(k8sClient.Update(ctx, ntnxSecret)).To(Succeed())

// Reconile Delete credential ref
err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster)
g.Expect(err).NotTo(HaveOccurred())

// Check that Nutanix Secret is deleted
g.Expect(k8sClient.Get(ctx, client.ObjectKey{
Namespace: ntnxSecret.Namespace,
Name: ntnxSecret.Name,
}, ntnxSecret)).ToNot(Succeed())
})
})

Context("Delete credentials ref reconcile failed: no credential ref", func() {
It("Should return error", func() {
ctx := context.Background()
reconciler := &NutanixClusterReconciler{
Client: k8sClient,
Scheme: runtime.NewScheme(),
}

ntnxCluster := &infrav1.NutanixCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Spec: infrav1.NutanixClusterSpec{
PrismCentral: &credentialTypes.NutanixPrismEndpoint{
// Adding port info to override default value (0)
Port: 9440,
},
},
}

// Create the NutanixCluster object
g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed())
defer func() {
err := k8sClient.Delete(ctx, ntnxCluster)
Expect(err).NotTo(HaveOccurred())
}()

// Reconile Delete credential ref
err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster)
g.Expect(err).To(HaveOccurred())
})
})

Context("Delete credentials ref reconcile failed: there is no secret", func() {
It("Should not return error", func() {
ctx := context.Background()
reconciler := &NutanixClusterReconciler{
Client: k8sClient,
Scheme: runtime.NewScheme(),
}

ntnxCluster := &infrav1.NutanixCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Spec: infrav1.NutanixClusterSpec{
PrismCentral: &credentialTypes.NutanixPrismEndpoint{
// Adding port info to override default value (0)
Port: 9440,
CredentialRef: &credentialTypes.NutanixCredentialReference{
Name: "test",
Namespace: "default",
Kind: "Secret",
},
},
},
}

// Create the NutanixCluster object
g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed())
defer func() {
err := k8sClient.Delete(ctx, ntnxCluster)
Expect(err).NotTo(HaveOccurred())
}()

// Reconile Delete credential ref
err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster)
g.Expect(err).NotTo(HaveOccurred())
})
})

Context("Delete credentials ref reconcile failed: PrismCentral Info is null", func() {
It("Should not return error", func() {
ctx := context.Background()
reconciler := &NutanixClusterReconciler{
Client: k8sClient,
Scheme: runtime.NewScheme(),
}

ntnxCluster := &infrav1.NutanixCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
},
Spec: infrav1.NutanixClusterSpec{
PrismCentral: nil,
},
}

// Create the NutanixCluster object
g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed())
defer func() {
err := k8sClient.Delete(ctx, ntnxCluster)
Expect(err).NotTo(HaveOccurred())
}()

// Reconile Delete credential ref
err := reconciler.reconcileCredentialRefDelete(ctx, ntnxCluster)
g.Expect(err).NotTo(HaveOccurred())
})
})
})
}
Loading

0 comments on commit 9490283

Please sign in to comment.