From d174f11f7d4afb834d825c6792a51206cdb1e50f Mon Sep 17 00:00:00 2001 From: Yannick Struyf Date: Thu, 30 Nov 2023 09:37:46 +0100 Subject: [PATCH] Add unit tests for failure domains --- controllers/helpers.go | 5 +- controllers/helpers_test.go | 123 ++++++++++++ controllers/nutanixcluster_controller_test.go | 182 ++++++++++++++++-- controllers/nutanixmachine_controller_test.go | 135 ++++++++++++- 4 files changed, 416 insertions(+), 29 deletions(-) create mode 100644 controllers/helpers_test.go diff --git a/controllers/helpers.go b/controllers/helpers.go index f4a1cfaa70..7f335ce82b 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -185,8 +185,11 @@ func FindVMByName(ctx context.Context, client *nutanixClientV3.Client, vmName st // GetPEUUID returns the UUID of the Prism Element cluster with the given name func GetPEUUID(ctx context.Context, client *nutanixClientV3.Client, peName, peUUID *string) (string, error) { + if client == nil { + return "", fmt.Errorf("cannot retrieve Prism Element UUID if nutanix client is nil") + } if peUUID == nil && peName == nil { - return "", fmt.Errorf("cluster name or uuid must be passed in order to retrieve the pe") + return "", fmt.Errorf("cluster name or uuid must be passed in order to retrieve the Prism Element UUID") } if peUUID != nil && *peUUID != "" { peIntentResponse, err := client.V3.GetCluster(ctx, *peUUID) diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go new file mode 100644 index 0000000000..c4a12bf4d0 --- /dev/null +++ b/controllers/helpers_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2023 Nutanix + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "testing" + + credentialTypes "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/cluster-api/util" + + infrav1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestControllerHelpers(t *testing.T) { + g := NewWithT(t) + + _ = Describe("ControllerHelpers", func() { + const ( + fd1Name = "fd-1" + fd2Name = "fd-2" + ) + + var ( + ntnxCluster *infrav1.NutanixCluster + ctx context.Context + ) + + BeforeEach(func() { + ctx = context.Background() + ntnxCluster = &infrav1.NutanixCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: infrav1.NutanixClusterSpec{ + PrismCentral: &credentialTypes.NutanixPrismEndpoint{ + // Adding port info to override default value (0) + Port: 9440, + }, + }, + } + }) + + AfterEach(func() { + err := k8sClient.Delete(ctx, ntnxCluster) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("Get failure domains", func() { + It("should error when passing empty failure domain name", func() { + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + _, err := GetFailureDomain("", ntnxCluster) + Expect(err).To(HaveOccurred()) + }) + It("should error when passing nil cluster", func() { + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + _, err := GetFailureDomain(fd1Name, nil) + Expect(err).To(HaveOccurred()) + }) + It("should error when no failure domain has been found", func() { + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + _, err := GetFailureDomain(fd1Name, ntnxCluster) + Expect(err).To(HaveOccurred()) + }) + It("should return the correct failuredomain", func() { + r := util.RandomString(10) + fd1 := infrav1.NutanixFailureDomain{ + Name: fd1Name, + Cluster: infrav1.NutanixResourceIdentifier{ + Type: infrav1.NutanixIdentifierName, + Name: &r, + }, + Subnets: []infrav1.NutanixResourceIdentifier{ + { + Type: infrav1.NutanixIdentifierName, + Name: &r, + }, + }, + } + fd2 := infrav1.NutanixFailureDomain{ + Name: fd2Name, + Cluster: infrav1.NutanixResourceIdentifier{ + Type: infrav1.NutanixIdentifierName, + Name: &r, + }, + Subnets: []infrav1.NutanixResourceIdentifier{ + { + Type: infrav1.NutanixIdentifierName, + Name: &r, + }, + }, + } + ntnxCluster.Spec.FailureDomains = []infrav1.NutanixFailureDomain{ + fd1, + fd2, + } + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + fd, err := GetFailureDomain(fd2Name, ntnxCluster) + Expect(err).ToNot(HaveOccurred()) + Expect(*fd).To(Equal(fd2)) + }) + }) + }) +} diff --git a/controllers/nutanixcluster_controller_test.go b/controllers/nutanixcluster_controller_test.go index dbfecd397c..266095d970 100644 --- a/controllers/nutanixcluster_controller_test.go +++ b/controllers/nutanixcluster_controller_test.go @@ -21,48 +21,81 @@ import ( "testing" credentialTypes "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" + nctx "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/pkg/context" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/gstruct" ) func TestNutanixClusterReconciler(t *testing.T) { g := NewWithT(t) _ = Describe("NutanixClusterReconciler", func() { - Context("Reconcile an NutanixCluster", func() { - It("should not error and not requeue the request", func() { - ctx := context.Background() - reconciler := &NutanixClusterReconciler{ - Client: k8sClient, - Scheme: runtime.NewScheme(), - } + const ( + fd1Name = "fd-1" + fd2Name = "fd-2" + ) + + var ( + ntnxCluster *infrav1.NutanixCluster + ctx context.Context + fd1 infrav1.NutanixFailureDomain + reconciler *NutanixClusterReconciler + r string + ) - ntnxCluster := &infrav1.NutanixCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "default", + BeforeEach(func() { + ctx = context.Background() + r := util.RandomString(10) + ntnxCluster = &infrav1.NutanixCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: infrav1.NutanixClusterSpec{ + PrismCentral: &credentialTypes.NutanixPrismEndpoint{ + // Adding port info to override default value (0) + Port: 9440, }, - Spec: infrav1.NutanixClusterSpec{ - PrismCentral: &credentialTypes.NutanixPrismEndpoint{ - // Adding port info to override default value (0) - Port: 9440, - }, + }, + } + fd1 = infrav1.NutanixFailureDomain{ + Name: fd1Name, + Cluster: infrav1.NutanixResourceIdentifier{ + Type: infrav1.NutanixIdentifierName, + Name: &r, + }, + Subnets: []infrav1.NutanixResourceIdentifier{ + { + Type: infrav1.NutanixIdentifierName, + Name: &r, }, - } + }, + } + reconciler = &NutanixClusterReconciler{ + Client: k8sClient, + Scheme: runtime.NewScheme(), + } + }) + AfterEach(func() { + err := k8sClient.Delete(ctx, ntnxCluster) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("Reconcile an NutanixCluster", func() { + It("should not error and not requeue the request", func() { // Create the NutanixCluster object and expect the Reconcile to be created g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) - defer func() { - err := k8sClient.Delete(ctx, ntnxCluster) - Expect(err).NotTo(HaveOccurred()) - }() result, err := reconciler.Reconcile(ctx, ctrl.Request{ NamespacedName: client.ObjectKey{ @@ -75,5 +108,112 @@ func TestNutanixClusterReconciler(t *testing.T) { g.Expect(result.Requeue).To(BeFalse()) }) }) + + Context("ReconcileNormal for a NutanixCluster", func() { + It("should not requeue if failure message is set on nutanixCluster", func() { + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + ntnxCluster.Status.FailureMessage = &r + result, err := reconciler.reconcileNormal(&nctx.ClusterContext{ + Context: ctx, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(result.RequeueAfter).To(BeZero()) + g.Expect(result.Requeue).To(BeFalse()) + }) + It("should not error and not requeue if no failure domains are configured and cluster is Ready", func() { + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + ntnxCluster.Status.Ready = true + result, err := reconciler.reconcileNormal(&nctx.ClusterContext{ + Context: ctx, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(result.RequeueAfter).To(BeZero()) + g.Expect(result.Requeue).To(BeFalse()) + }) + It("should not error and not requeue if failure domains are configured and cluster is Ready", func() { + ntnxCluster.Spec.FailureDomains = []infrav1.NutanixFailureDomain{ + fd1, + } + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + ntnxCluster.Status.Ready = true + result, err := reconciler.reconcileNormal(&nctx.ClusterContext{ + Context: ctx, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(result.RequeueAfter).To(BeZero()) + g.Expect(result.Requeue).To(BeFalse()) + }) + }) + + Context("Reconcile failure domains", func() { + It("sets the failure domains in the nutanixcluster status and failure domain reconciled condition", func() { + ntnxCluster.Spec.FailureDomains = []infrav1.NutanixFailureDomain{ + fd1, + } + + // Create the NutanixCluster object and expect the Reconcile to be created + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + // Retrieve the applied nutanix cluster objects + appliedNtnxCluster := &infrav1.NutanixCluster{} + k8sClient.Get(ctx, client.ObjectKey{ + Namespace: ntnxCluster.Namespace, + Name: ntnxCluster.Name, + }, appliedNtnxCluster) + + err := reconciler.reconcileFailureDomains(&nctx.ClusterContext{ + Context: ctx, + NutanixCluster: appliedNtnxCluster, + }) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(appliedNtnxCluster.Status.Conditions).To(ContainElement( + gstruct.MatchFields( + gstruct.IgnoreExtras, + gstruct.Fields{ + "Type": Equal(infrav1.FailureDomainsReconciled), + "Status": Equal(corev1.ConditionTrue), + }, + ), + )) + g.Expect(appliedNtnxCluster.Status.FailureDomains).To(HaveKey(fd1Name)) + g.Expect(appliedNtnxCluster.Status.FailureDomains[fd1Name]).To(gstruct.MatchFields( + gstruct.IgnoreExtras, + gstruct.Fields{ + "ControlPlane": Equal(fd1.ControlPlane), + }, + )) + }) + + It("sets the NoFailureDomainsReconciled condition when no failure domains are set", func() { + // Create the NutanixCluster object and expect the Reconcile to be created + g.Expect(k8sClient.Create(ctx, ntnxCluster)).To(Succeed()) + // Retrieve the applied nutanix cluster objects + appliedNtnxCluster := &infrav1.NutanixCluster{} + k8sClient.Get(ctx, client.ObjectKey{ + Namespace: ntnxCluster.Namespace, + Name: ntnxCluster.Name, + }, appliedNtnxCluster) + + err := reconciler.reconcileFailureDomains(&nctx.ClusterContext{ + Context: ctx, + NutanixCluster: appliedNtnxCluster, + }) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(appliedNtnxCluster.Status.Conditions).To(ContainElement( + gstruct.MatchFields( + gstruct.IgnoreExtras, + gstruct.Fields{ + "Type": Equal(infrav1.NoFailureDomainsReconciled), + "Status": Equal(corev1.ConditionTrue), + }, + ), + )) + g.Expect(appliedNtnxCluster.Status.FailureDomains).To(BeEmpty()) + }) + }) }) } diff --git a/controllers/nutanixmachine_controller_test.go b/controllers/nutanixmachine_controller_test.go index ed01a7b1dc..b63d3e753e 100644 --- a/controllers/nutanixmachine_controller_test.go +++ b/controllers/nutanixmachine_controller_test.go @@ -22,10 +22,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + capiv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" + nctx "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/pkg/context" + credentialTypes "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -34,16 +38,49 @@ func TestNutanixMachineReconciler(t *testing.T) { g := NewWithT(t) _ = Describe("NutanixMachineReconciler", func() { + var ( + reconciler *NutanixMachineReconciler + ctx context.Context + ntnxMachine *infrav1.NutanixMachine + machine *capiv1.Machine + ntnxCluster *infrav1.NutanixCluster + r string + ) + + BeforeEach(func() { + ctx = context.Background() + r = util.RandomString(10) + reconciler = &NutanixMachineReconciler{ + Client: k8sClient, + Scheme: runtime.NewScheme(), + } + + ntnxMachine = &infrav1.NutanixMachine{ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }} + machine = &capiv1.Machine{ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }} + + ntnxCluster = &infrav1.NutanixCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: infrav1.NutanixClusterSpec{ + PrismCentral: &credentialTypes.NutanixPrismEndpoint{ + // Adding port info to override default value (0) + Port: 9440, + }, + }, + } + }) + Context("Reconcile an NutanixMachine", func() { It("should not error or requeue the request", func() { By("Calling reconcile") - reconciler := &NutanixMachineReconciler{ - Client: k8sClient, - Scheme: runtime.NewScheme(), - } - - ctx := context.Background() - ntnxMachine := &infrav1.NutanixMachine{ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}} result, err := reconciler.Reconcile(ctx, ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: ntnxMachine.Namespace, @@ -55,5 +92,89 @@ func TestNutanixMachineReconciler(t *testing.T) { g.Expect(result.Requeue).To(BeFalse()) }) }) + Context("Validates machine config", func() { + It("should error if no failure domain is present on machine and no subnets are passed", func() { + err := reconciler.validateMachineConfig(&nctx.MachineContext{ + Context: ctx, + NutanixMachine: ntnxMachine, + Machine: machine, + }) + g.Expect(err).To(HaveOccurred()) + }) + }) + + Context("Gets the subnet and PE UUIDs", func() { + It("should error if nil machine context is passed", func() { + _, _, err := reconciler.GetSubnetAndPEUUIDs(nil) + g.Expect(err).To(HaveOccurred()) + }) + }) + It("should error if machine has no failure domain and Prism Element info is missing on nutanix machine", func() { + _, _, err := reconciler.GetSubnetAndPEUUIDs(&nctx.MachineContext{ + Context: ctx, + NutanixMachine: ntnxMachine, + Machine: machine, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).To(HaveOccurred()) + }) + It("should error if machine has no failure domain and subnet info is missing on nutanix machine", func() { + ntnxMachine.Spec.Cluster = infrav1.NutanixResourceIdentifier{ + Type: infrav1.NutanixIdentifierName, + Name: &r, + } + _, _, err := reconciler.GetSubnetAndPEUUIDs(&nctx.MachineContext{ + Context: ctx, + NutanixMachine: ntnxMachine, + Machine: machine, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).To(HaveOccurred()) + }) + It("should error if machine has no failure domain and nutanixClient is nil", func() { + ntnxMachine.Spec.Cluster = infrav1.NutanixResourceIdentifier{ + Type: infrav1.NutanixIdentifierName, + Name: &r, + } + ntnxMachine.Spec.Subnets = []infrav1.NutanixResourceIdentifier{ + { + Type: infrav1.NutanixIdentifierName, + Name: &r, + }, + } + _, _, err := reconciler.GetSubnetAndPEUUIDs(&nctx.MachineContext{ + Context: ctx, + NutanixMachine: ntnxMachine, + Machine: machine, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).To(HaveOccurred()) + }) + It("should error if machine has failure domain and but it is missing on nutanixCluster object", func() { + machine.Spec.FailureDomain = &r + + _, _, err := reconciler.GetSubnetAndPEUUIDs(&nctx.MachineContext{ + Context: ctx, + NutanixMachine: ntnxMachine, + Machine: machine, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).To(HaveOccurred()) + }) + It("should error if machine and nutanixCluster have failure domain and but nutanixClient is nil", func() { + machine.Spec.FailureDomain = &r + ntnxCluster.Spec.FailureDomains = []infrav1.NutanixFailureDomain{ + { + Name: r, + }, + } + _, _, err := reconciler.GetSubnetAndPEUUIDs(&nctx.MachineContext{ + Context: ctx, + NutanixMachine: ntnxMachine, + Machine: machine, + NutanixCluster: ntnxCluster, + }) + g.Expect(err).To(HaveOccurred()) + }) }) }