Skip to content

Commit

Permalink
Make CCM for CAPX by default
Browse files Browse the repository at this point in the history
 - get rid of node reconcile
 - manifests changes
 - makefile changes
 - tests
  • Loading branch information
adiantum committed Dec 4, 2023
1 parent 266c300 commit 596ee79
Show file tree
Hide file tree
Showing 40 changed files with 1,581 additions and 777 deletions.
32 changes: 20 additions & 12 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ IMG_TAG := latest
endif

ifeq (${LOCAL_PROVIDER_VERSION},latest)
LOCAL_PROVIDER_VERSION := v0.0.0
LOCAL_PROVIDER_VERSION := v1.3.99
endif

# PLATFORMS is a list of platforms to build for.
Expand Down Expand Up @@ -132,6 +132,9 @@ else
GOBIN=$(shell go env GOBIN)
endif

# Get latest git hash
GIT_COMMIT_HASH=$(shell git rev-parse HEAD)

# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
Expand Down Expand Up @@ -234,17 +237,17 @@ kind-delete: ## Delete the kind cluster

.PHONY: build
build: generate fmt ## Build manager binary.
GIT_COMMIT_HASH=`git rev-parse HEAD` && \
go build -ldflags "-X main.gitCommitHash=$${GIT_COMMIT_HASH}" -o bin/manager main.go
echo "Git commit hash: ${GIT_COMMIT_HASH}"
go build -ldflags "-X main.gitCommitHash=${GIT_COMMIT_HASH}" -o bin/manager main.go

.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go

.PHONY: docker-build
docker-build: $(KO) ## Build docker image with the manager.
GIT_COMMIT_HASH=`git rev-parse HEAD` && \
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=$${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS} -t ${IMG_TAG} -L .
echo "Git commit hash: ${GIT_COMMIT_HASH}"
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS} -t ${IMG_TAG} -L .

.PHONY: docker-push
docker-push: $(KO) ## Push docker image with the manager.
Expand Down Expand Up @@ -285,7 +288,10 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi
##@ Templates

.PHONY: cluster-e2e-templates
cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 ## Generate cluster templates for all versions
cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 cluster-e2e-templates-v124 ## Generate cluster templates for all versions

cluster-e2e-templates-v124: $(KUSTOMIZE) ## Generate cluster templates for CAPX v1.2.4
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template.yaml

cluster-e2e-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alpha4
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template.yaml
Expand All @@ -297,7 +303,6 @@ cluster-e2e-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation.yaml
Expand All @@ -315,7 +320,6 @@ cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates wi
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-additional-categories --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-additional-categories.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-no-nmt --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-no-nmt.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-project --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-project.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-ccm --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-ccm.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-md-remediation.yaml
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1beta1/no-kubeproxy/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1beta1/cluster-template-kcp-remediation.yaml
Expand All @@ -325,15 +329,14 @@ cluster-e2e-templates-no-kubeproxy: $(KUSTOMIZE) ##Generate cluster templates wi
cluster-templates: $(KUSTOMIZE) ## Generate cluster templates for all flavors
$(KUSTOMIZE) build $(TEMPLATES_DIR)/base > $(TEMPLATES_DIR)/cluster-template.yaml
$(KUSTOMIZE) build $(TEMPLATES_DIR)/csi > $(TEMPLATES_DIR)/cluster-template-csi.yaml
$(KUSTOMIZE) build $(TEMPLATES_DIR)/ccm > $(TEMPLATES_DIR)/cluster-template-ccm.yaml

##@ Testing

.PHONY: docker-build-e2e
docker-build-e2e: $(KO) ## Build docker image with the manager with e2e tag.
GIT_COMMIT_HASH=`git rev-parse HEAD` && \
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=$${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS_E2E} -t e2e -L .
docker tag ko.local/cluster-api-provider-nutanix:e2e ${IMG_REPO}:e2e
echo "Git commit hash: ${GIT_COMMIT_HASH}"
KO_DOCKER_REPO=ko.local GOFLAGS="-ldflags=-X=main.gitCommitHash=${GIT_COMMIT_HASH}" $(KO) build -B --platform=${PLATFORMS_E2E} -t e2e-${GIT_COMMIT_HASH} -L .
docker tag ko.local/cluster-api-provider-nutanix:e2e-${GIT_COMMIT_HASH} ${IMG_REPO}:e2e

.PHONY: prepare-local-clusterctl
prepare-local-clusterctl: manifests kustomize cluster-templates ## Prepare overide file for local clusterctl.
Expand Down Expand Up @@ -466,6 +469,11 @@ test-e2e-cilium-no-kubeproxy:
.PHONY: test-e2e-all-cni
test-e2e-all-cni: test-e2e test-e2e-calico test-e2e-flannel test-e2e-cilium test-e2e-cilium-no-kubeproxy

.PHONY: test-e2e-clusterctl-upgrade
test-e2e-clusterctl-upgrade: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates cluster-templates ## Run the end-to-end tests
docker tag ko.local/cluster-api-provider-nutanix:${IMG_TAG} harbor.eng.nutanix.com/prow-tests/capx/cluster-api-provider-nutanix/controller:${IMG_TAG}
docker push harbor.eng.nutanix.com/prow-tests/capx/cluster-api-provider-nutanix/controller:${IMG_TAG}
GINKGO_SKIP="" GIT_COMMIT="${GIT_COMMIT_HASH}" $(MAKE) test-e2e-calico

## --------------------------------------
## Hack / Tools
Expand Down
1 change: 1 addition & 0 deletions api/v1beta1/nutanixmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ type NutanixMachineStatus struct {
VmUUID string `json:"vmUUID,omitempty"`

// NodeRef is a reference to the corresponding workload cluster Node if it exists.
// Deprecated: Do not use. Will be removed in a future release.
// +optional
NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"`

Expand Down
7 changes: 5 additions & 2 deletions controllers/nutanixcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,11 @@ func (r *NutanixClusterReconciler) reconcileDelete(rctx *nctx.ClusterContext) (r
}

// Remove the finalizer from the NutanixCluster object
log.V(1).Info("Removing finalizer from NutanixCluster")

Check warning on line 234 in controllers/nutanixcluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixcluster_controller.go#L234

Added line #L234 was not covered by tests
ctrlutil.RemoveFinalizer(rctx.NutanixCluster, infrav1.NutanixClusterFinalizer)

// Remove the workload cluster client from cache
log.V(1).Info("Removing workload cluster client from cache")

Check warning on line 238 in controllers/nutanixcluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixcluster_controller.go#L238

Added line #L238 was not covered by tests
clusterKey := apitypes.NamespacedName{
Namespace: rctx.Cluster.Namespace,
Name: rctx.Cluster.Name,
Expand Down Expand Up @@ -334,9 +336,10 @@ func (r *NutanixClusterReconciler) reconcileCredentialRefDelete(ctx context.Cont
}

if secret.DeletionTimestamp.IsZero() {
log.Info(fmt.Sprintf("removing secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.Name))
log.V(1).Info(fmt.Sprintf("removing secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.Name))

Check warning on line 339 in controllers/nutanixcluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixcluster_controller.go#L339

Added line #L339 was not covered by tests
if err := r.Client.Delete(ctx, secret); err != nil {
return err
log.V(1).Info(fmt.Sprintf("failed to delete secret %s in namespace %s for cluster %s. Ignore the error: %s", secret.Name, secret.Namespace, nutanixCluster.Name, err))
return nil

Check warning on line 342 in controllers/nutanixcluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nutanixcluster_controller.go#L341-L342

Added lines #L341 - L342 were not covered by tests
}
}

Expand Down
71 changes: 0 additions & 71 deletions controllers/nutanixmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -373,10 +373,6 @@ func (r *NutanixMachineReconciler) reconcileNormal(rctx *nctx.MachineContext) (r
}
log.Info(fmt.Sprintf("The NutanixMachine is ready, providerID: %s", rctx.NutanixMachine.Spec.ProviderID))

if rctx.NutanixMachine.Status.NodeRef == nil {
return r.reconcileNode(rctx)
}

return reconcile.Result{}, nil
}

Expand Down Expand Up @@ -445,73 +441,6 @@ func (r *NutanixMachineReconciler) reconcileNormal(rctx *nctx.MachineContext) (r
return reconcile.Result{}, nil
}

// reconcileNode makes sure the NutanixMachine corresponding workload cluster node
// is ready and set its spec.providerID
func (r *NutanixMachineReconciler) reconcileNode(rctx *nctx.MachineContext) (reconcile.Result, error) {
log := ctrl.LoggerFrom(rctx.Context)
log.V(1).Info("Reconcile the workload cluster node to set its spec.providerID")

clusterKey := apitypes.NamespacedName{
Namespace: rctx.Cluster.Namespace,
Name: rctx.Cluster.Name,
}
remoteClient, err := nctx.GetRemoteClient(rctx.Context, r.Client, clusterKey)
if err != nil {
if r.isGetRemoteClientConnectionError(err) {
log.Info(fmt.Sprintf("Controlplane endpoint not yet responding. Requeuing: %v", err))
return reconcile.Result{Requeue: true}, nil
}
log.Info(fmt.Sprintf("Failed to get the client to access remote workload cluster %s. %v", rctx.Cluster.Name, err))
return reconcile.Result{}, err
}

// Retrieve the remote node
nodeName := rctx.Machine.Name
node := &corev1.Node{}
nodeKey := apitypes.NamespacedName{
Namespace: "",
Name: nodeName,
}

if err := remoteClient.Get(rctx.Context, nodeKey, node); err != nil {
if apierrors.IsNotFound(err) {
log.Info(fmt.Sprintf("workload node %s not yet ready. Requeuing", nodeName))
return reconcile.Result{Requeue: true}, nil
} else {
log.Error(err, fmt.Sprintf("failed to retrieve the remote workload cluster node %s", nodeName))
return reconcile.Result{}, err
}
}

// Set the NutanixMachine Status.NodeRef
if rctx.NutanixMachine.Status.NodeRef == nil {
rctx.NutanixMachine.Status.NodeRef = &corev1.ObjectReference{
Kind: node.Kind,
APIVersion: node.APIVersion,
Name: node.Name,
UID: node.UID,
}
log.V(1).Info(fmt.Sprintf("Set NutanixMachine's status.nodeRef: %v", *rctx.NutanixMachine.Status.NodeRef))
}

// Update the node's Spec.ProviderID
patchHelper, err := patch.NewHelper(node, remoteClient)
if err != nil {
log.Error(err, fmt.Sprintf("failed to create patchHelper for the workload cluster node %s", nodeName))
return reconcile.Result{}, err
}

node.Spec.ProviderID = rctx.NutanixMachine.Spec.ProviderID
err = patchHelper.Patch(rctx.Context, node)
if err != nil {
log.Error(err, fmt.Sprintf("failed to patch the remote workload cluster node %s's spec.providerID", nodeName))
return reconcile.Result{}, err
}
log.Info(fmt.Sprintf("Patched the workload node %s spec.providerID: %s", nodeName, node.Spec.ProviderID))

return reconcile.Result{}, nil
}

func (r *NutanixMachineReconciler) validateMachineConfig(rctx *nctx.MachineContext) error {
if len(rctx.NutanixMachine.Spec.Subnets) == 0 {
return fmt.Errorf("atleast one subnet is needed to create the VM %s", rctx.NutanixMachine.Name)
Expand Down
3 changes: 1 addition & 2 deletions hack/install-go.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@ set -o errexit
set -o nounset
set -o pipefail

wget https://go.dev/dl/go1.21.4.linux-amd64.tar.gz
wget -q https://go.dev/dl/go1.21.4.linux-amd64.tar.gz
rm -rf /usr/local/go && tar -C /usr/local -xzf go1.21.4.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin
go version

3 changes: 3 additions & 0 deletions metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ releaseSeries:
- major: 1
minor: 2
contract: v1beta1
- major: 1
minor: 3
contract: v1beta1
- major: 0
minor: 0
contract: v1beta1
2 changes: 1 addition & 1 deletion scripts/ccm_nutanix_update.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@ helm template -n kube-system nutanix-cloud-provider nutanix/nutanix-cloud-provid
--set prismCentralEndPoint='${NUTANIX_ENDPOINT}',prismCentralPort='${NUTANIX_PORT=9440}',prismCentralInsecure='${NUTANIX_INSECURE=false}' \
--set image.repository="\${CCM_REPO=$NUTANIX_CCM_REPO}",image.tag="\${CCM_TAG=v$NUTANIX_CCM_VERSION}" \
--set createSecret=false \
> templates/ccm/nutanix-ccm.yaml
> templates/base/nutanix-ccm.yaml
File renamed without changes.
12 changes: 12 additions & 0 deletions templates/base/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -1,10 +1,22 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

configMapGenerator:
- name: nutanix-ccm
behavior: merge
files:
- nutanix-ccm.yaml

bases:
- ./cluster-with-kcp.yaml
- ./secret.yaml
- ./cm.yaml
- ./nmt.yaml
- ./md.yaml
- ./mhc.yaml
- ./nutanix-ccm-crs.yaml
- ./nutanix-ccm-secret.yaml

patchesStrategicMerge:
- ccm-patch.yaml

File renamed without changes.
File renamed without changes.
File renamed without changes.
16 changes: 0 additions & 16 deletions templates/ccm/kustomization.yaml

This file was deleted.

Loading

0 comments on commit 596ee79

Please sign in to comment.