diff --git a/controllers/helpers.go b/controllers/helpers.go index 6fa6ff6932..e8b76e81bd 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -504,9 +504,11 @@ func deleteCategoryKeyValues(ctx context.Context, client *prismclientv3.Client, err = client.V3.DeleteCategoryValue(ctx, key, value) if err != nil { - errorMsg := fmt.Errorf("failed to delete category with key %s. error: %v", key, err) - log.Error(errorMsg, "failed to delete category") - return errorMsg + errorMsg := fmt.Errorf("failed to delete category value with key:value %s:%s. error: %v", key, value, err) + log.Error(errorMsg, "failed to delete category value") + // NCN-101935: If the category value still has VMs assigned, do not delete the category key:value + // TODO:deepakmntnx Add a check for specific error mentioned in NCN-101935 + return nil } } diff --git a/test/e2e/categories_test.go b/test/e2e/categories_test.go index 08989c8f8a..d419120e4f 100644 --- a/test/e2e/categories_test.go +++ b/test/e2e/categories_test.go @@ -20,6 +20,7 @@ package e2e import ( "context" + "os" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -174,4 +175,111 @@ var _ = Describe("Nutanix categories", Label("capx-feature-test", "categories"), By("PASSED!") }) + + It("Create and delete 2 clusters with same name with default cluster categories and should succeed", Label("same-name-clusters"), func() { + Expect(namespace).NotTo(BeNil()) + flavor := clusterctl.DefaultFlavor + expectedClusterNameCategoryKey := infrav1.DefaultCAPICategoryKeyForName + By("Creating a workload cluster 1", func() { + testHelper.deployClusterAndWait( + deployClusterParams{ + clusterName: clusterName, + namespace: namespace, + flavor: flavor, + clusterctlConfigPath: clusterctlConfigPath, + artifactFolder: artifactFolder, + bootstrapClusterProxy: bootstrapClusterProxy, + }, clusterResources) + }) + + By("Checking cluster category condition is true", func() { + testHelper.verifyConditionOnNutanixCluster(verifyConditionParams{ + clusterName: clusterName, + namespace: namespace, + bootstrapClusterProxy: bootstrapClusterProxy, + expectedCondition: clusterv1.Condition{ + Type: infrav1.ClusterCategoryCreatedCondition, + Status: corev1.ConditionTrue, + }, + }) + }) + + By("Checking if a category was created", func() { + testHelper.verifyCategoryExists(ctx, expectedClusterNameCategoryKey, clusterName) + }) + + By("Checking if there are VMs assigned to this category", func() { + expectedCategories := map[string]string{ + expectedClusterNameCategoryKey: clusterName, + } + testHelper.verifyCategoriesNutanixMachines(ctx, clusterName, namespace.Name, expectedCategories) + }) + var cp1EndpointIP string + By("Setting different Control plane endpoint IP for 2nd cluster", func() { + cp1EndpointIP = testHelper.getVariableFromE2eConfig("CONTROL_PLANE_ENDPOINT_IP") + cp2EndpointIP := testHelper.getVariableFromE2eConfig("CONTROL_PLANE_ENDPOINT_IP_WORKLOAD_CLUSTER") + if cp2EndpointIP == "" { + cp2EndpointIP = os.Getenv("CONTROL_PLANE_ENDPOINT_IP_WORKLOAD_CLUSTER") + if cp2EndpointIP == "" { + Fail("CONTROL_PLANE_ENDPOINT_IP_WORKLOAD_CLUSTER not set") + } + } + testHelper.updateVariableInE2eConfig("CONTROL_PLANE_ENDPOINT_IP", cp2EndpointIP) + }) + + var ( + namespace2 *corev1.Namespace + clusterResources2 *clusterctl.ApplyClusterTemplateAndWaitResult + cancelWatches2 context.CancelFunc + ) + By("configure env for 2nd cluster", func() { + clusterResources2 = new(clusterctl.ApplyClusterTemplateAndWaitResult) + namespace2, cancelWatches2 = setupSpecNamespace(ctx, specName+"-2", bootstrapClusterProxy, artifactFolder) + }) + + By("Creating a workload cluster 2 with same name", func() { + testHelper.deployClusterAndWait( + deployClusterParams{ + clusterName: clusterName, + namespace: namespace2, + flavor: flavor, + clusterctlConfigPath: clusterctlConfigPath, + artifactFolder: artifactFolder, + bootstrapClusterProxy: bootstrapClusterProxy, + }, clusterResources2) + }) + + By("Checking cluster category condition is true", func() { + testHelper.verifyConditionOnNutanixCluster(verifyConditionParams{ + clusterName: clusterName, + namespace: namespace2, + bootstrapClusterProxy: bootstrapClusterProxy, + expectedCondition: clusterv1.Condition{ + Type: infrav1.ClusterCategoryCreatedCondition, + Status: corev1.ConditionTrue, + }, + }) + }) + + By("Checking if a category was created", func() { + testHelper.verifyCategoryExists(ctx, expectedClusterNameCategoryKey, clusterName) + }) + + By("Checking if there are VMs assigned to this category", func() { + expectedCategories := map[string]string{ + expectedClusterNameCategoryKey: clusterName, + } + testHelper.verifyCategoriesNutanixMachines(ctx, clusterName, namespace2.Name, expectedCategories) + }) + + By("Delete 2nd cluster and its namespace", func() { + dumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace2, cancelWatches2, clusterResources2.Cluster, e2eConfig.GetIntervals, skipCleanup) + }) + + By("Set original Control plane endpoint IP", func() { + testHelper.updateVariableInE2eConfig("CONTROL_PLANE_ENDPOINT_IP", cp1EndpointIP) + }) + + // AfterEach section will take care of deleting the first cluster + }) }) diff --git a/test/e2e/common.go b/test/e2e/common.go index 7aaefd828b..0dddd5647b 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -64,6 +64,14 @@ func setupSpecNamespace(ctx context.Context, specName string, clusterProxy frame return namespace, cancelWatches } +func unsetupNamespace(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, namespace *corev1.Namespace) { + Byf("Deleting namespace used for hosting the %q test spec", specName) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: clusterProxy.GetClient(), + Name: namespace.Name, + }) +} + func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool) { Byf("Dumping logs from the %q workload cluster", cluster.Name) @@ -89,11 +97,7 @@ func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr Namespace: namespace.Name, }, intervalsGetter(specName, "wait-delete-cluster")...) - Byf("Deleting namespace used for hosting the %q test spec", specName) - framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ - Deleter: clusterProxy.GetClient(), - Name: namespace.Name, - }) + unsetupNamespace(ctx, specName, clusterProxy, namespace) } cancelWatches() } diff --git a/test/e2e/test_helpers.go b/test/e2e/test_helpers.go index 35802f99b0..b80fa3972e 100644 --- a/test/e2e/test_helpers.go +++ b/test/e2e/test_helpers.go @@ -132,6 +132,8 @@ type testHelperInterface interface { deployCluster(params deployClusterParams, clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult) deployClusterAndWait(params deployClusterParams, clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult) deleteSecret(params deleteSecretParams) + deleteAllClustersAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, namespace *corev1.Namespace, intervalsGetter func(spec, key string) []interface{}) + deleteClusterAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}) findGPU(ctx context.Context, gpuName string) *prismGoClientV3.GPU generateNMTName(clusterName string) string generateNMTProviderID(clusterName string) string @@ -144,6 +146,7 @@ type testHelperInterface interface { getNutanixResourceIdentifierFromEnv(envVarKey string) infrav1.NutanixResourceIdentifier getNutanixResourceIdentifierFromE2eConfig(variableKey string) infrav1.NutanixResourceIdentifier getVariableFromE2eConfig(variableKey string) string + updateVariableInE2eConfig(variableKey string, variableValue string) stripNutanixIDFromProviderID(providerID string) string verifyCategoryExists(ctx context.Context, categoryKey, categoyValue string) verifyCategoriesNutanixMachines(ctx context.Context, clusterName, namespace string, expectedCategories map[string]string) @@ -465,6 +468,20 @@ func (t testHelper) deployClusterAndWait(params deployClusterParams, clusterReso }, clusterResources) } +func (t testHelper) deleteAllClustersAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, namespace *corev1.Namespace, intervalsGetter func(spec, key string) []interface{}) { + framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ + Client: bootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + }, intervalsGetter(specName, "wait-delete-cluster")...) +} + +func (t testHelper) deleteClusterAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}) { + framework.DeleteClusterAndWait(ctx, framework.DeleteClusterAndWaitInput{ + Client: bootstrapClusterProxy.GetClient(), + Cluster: cluster, + }, intervalsGetter(specName, "wait-delete-cluster")...) +} + func (t testHelper) generateNMTName(clusterName string) string { return fmt.Sprintf("%s-mt-0", clusterName) } @@ -559,6 +576,11 @@ func (t testHelper) getVariableFromE2eConfig(variableKey string) string { return variableValue } +func (t testHelper) updateVariableInE2eConfig(variableKey string, variableValue string) { + t.e2eConfig.Variables[variableKey] = variableValue + os.Setenv(variableKey, variableValue) +} + func (t testHelper) stripNutanixIDFromProviderID(providerID string) string { return strings.TrimPrefix(providerID, nutanixProviderIDPrefix) }