Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(controller): ignore category value delete error #470

Merged
merged 1 commit into from
Aug 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions controllers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -503,9 +503,11 @@

err = client.V3.DeleteCategoryValue(ctx, key, value)
if err != nil {
errorMsg := fmt.Errorf("failed to delete category with key %s. error: %v", key, err)
log.Error(errorMsg, "failed to delete category")
return errorMsg
errorMsg := fmt.Errorf("failed to delete category value with key:value %s:%s. error: %v", key, value, err)
log.Error(errorMsg, "failed to delete category value")
// NCN-101935: If the category value still has VMs assigned, do not delete the category key:value
// TODO:deepakmntnx Add a check for specific error mentioned in NCN-101935
return nil
}
}

Expand Down Expand Up @@ -773,7 +775,7 @@
managementEndpoint, err := clientHelper.BuildManagementEndpoint(ctx, cluster)
if err != nil {
log.Error(err, fmt.Sprintf("error occurred while getting management endpoint for cluster %q", cluster.GetNamespacedName()))
conditions.MarkFalse(cluster, infrav1.PrismCentralClientCondition, infrav1.PrismCentralClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())

Check failure on line 778 in controllers/helpers.go

View workflow job for this annotation

GitHub Actions / build-container

printf: non-constant format string in call to sigs.k8s.io/cluster-api/util/conditions.MarkFalse (govet)
return nil, err
}

Expand All @@ -783,7 +785,7 @@
})
if err != nil {
log.Error(err, "error occurred while getting nutanix prism v3 Client from cache")
conditions.MarkFalse(cluster, infrav1.PrismCentralClientCondition, infrav1.PrismCentralClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())

Check failure on line 788 in controllers/helpers.go

View workflow job for this annotation

GitHub Actions / build-container

printf: non-constant format string in call to sigs.k8s.io/cluster-api/util/conditions.MarkFalse (govet)
return nil, fmt.Errorf("nutanix prism v3 Client error: %w", err)
}

Expand All @@ -798,7 +800,7 @@
managementEndpoint, err := clientHelper.BuildManagementEndpoint(ctx, cluster)
if err != nil {
log.Error(err, fmt.Sprintf("error occurred while getting management endpoint for cluster %q", cluster.GetNamespacedName()))
conditions.MarkFalse(cluster, infrav1.PrismCentralV4ClientCondition, infrav1.PrismCentralV4ClientInitializationFailed, capiv1.ConditionSeverityError, err.Error())

Check failure on line 803 in controllers/helpers.go

View workflow job for this annotation

GitHub Actions / build-container

printf: non-constant format string in call to sigs.k8s.io/cluster-api/util/conditions.MarkFalse (govet)
return nil, err
}

Expand Down
108 changes: 108 additions & 0 deletions test/e2e/categories_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ package e2e

import (
"context"
"os"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
Expand Down Expand Up @@ -174,4 +175,111 @@ var _ = Describe("Nutanix categories", Label("capx-feature-test", "categories"),

By("PASSED!")
})

It("Create and delete 2 clusters with same name with default cluster categories and should succeed", Label("same-name-clusters"), func() {
Expect(namespace).NotTo(BeNil())
flavor := clusterctl.DefaultFlavor
expectedClusterNameCategoryKey := infrav1.DefaultCAPICategoryKeyForName
By("Creating a workload cluster 1", func() {
testHelper.deployClusterAndWait(
deployClusterParams{
clusterName: clusterName,
namespace: namespace,
flavor: flavor,
clusterctlConfigPath: clusterctlConfigPath,
artifactFolder: artifactFolder,
bootstrapClusterProxy: bootstrapClusterProxy,
}, clusterResources)
})

By("Checking cluster category condition is true", func() {
testHelper.verifyConditionOnNutanixCluster(verifyConditionParams{
clusterName: clusterName,
namespace: namespace,
bootstrapClusterProxy: bootstrapClusterProxy,
expectedCondition: clusterv1.Condition{
Type: infrav1.ClusterCategoryCreatedCondition,
Status: corev1.ConditionTrue,
},
})
})

By("Checking if a category was created", func() {
testHelper.verifyCategoryExists(ctx, expectedClusterNameCategoryKey, clusterName)
})

By("Checking if there are VMs assigned to this category", func() {
expectedCategories := map[string]string{
expectedClusterNameCategoryKey: clusterName,
}
testHelper.verifyCategoriesNutanixMachines(ctx, clusterName, namespace.Name, expectedCategories)
})
var cp1EndpointIP string
By("Setting different Control plane endpoint IP for 2nd cluster", func() {
cp1EndpointIP = testHelper.getVariableFromE2eConfig("CONTROL_PLANE_ENDPOINT_IP")
cp2EndpointIP := testHelper.getVariableFromE2eConfig("CONTROL_PLANE_ENDPOINT_IP_WORKLOAD_CLUSTER")
if cp2EndpointIP == "" {
cp2EndpointIP = os.Getenv("CONTROL_PLANE_ENDPOINT_IP_WORKLOAD_CLUSTER")
if cp2EndpointIP == "" {
Fail("CONTROL_PLANE_ENDPOINT_IP_WORKLOAD_CLUSTER not set")
}
}
testHelper.updateVariableInE2eConfig("CONTROL_PLANE_ENDPOINT_IP", cp2EndpointIP)
})

var (
namespace2 *corev1.Namespace
clusterResources2 *clusterctl.ApplyClusterTemplateAndWaitResult
cancelWatches2 context.CancelFunc
)
By("configure env for 2nd cluster", func() {
clusterResources2 = new(clusterctl.ApplyClusterTemplateAndWaitResult)
namespace2, cancelWatches2 = setupSpecNamespace(ctx, specName+"-2", bootstrapClusterProxy, artifactFolder)
})

By("Creating a workload cluster 2 with same name", func() {
testHelper.deployClusterAndWait(
deployClusterParams{
clusterName: clusterName,
namespace: namespace2,
flavor: flavor,
clusterctlConfigPath: clusterctlConfigPath,
artifactFolder: artifactFolder,
bootstrapClusterProxy: bootstrapClusterProxy,
}, clusterResources2)
})

By("Checking cluster category condition is true", func() {
testHelper.verifyConditionOnNutanixCluster(verifyConditionParams{
clusterName: clusterName,
namespace: namespace2,
bootstrapClusterProxy: bootstrapClusterProxy,
expectedCondition: clusterv1.Condition{
Type: infrav1.ClusterCategoryCreatedCondition,
Status: corev1.ConditionTrue,
},
})
})

By("Checking if a category was created", func() {
testHelper.verifyCategoryExists(ctx, expectedClusterNameCategoryKey, clusterName)
})

By("Checking if there are VMs assigned to this category", func() {
expectedCategories := map[string]string{
expectedClusterNameCategoryKey: clusterName,
}
testHelper.verifyCategoriesNutanixMachines(ctx, clusterName, namespace2.Name, expectedCategories)
})

By("Delete 2nd cluster and its namespace", func() {
dumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace2, cancelWatches2, clusterResources2.Cluster, e2eConfig.GetIntervals, skipCleanup)
})

By("Set original Control plane endpoint IP", func() {
testHelper.updateVariableInE2eConfig("CONTROL_PLANE_ENDPOINT_IP", cp1EndpointIP)
})

// AfterEach section will take care of deleting the first cluster
})
})
14 changes: 9 additions & 5 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,14 @@ func setupSpecNamespace(ctx context.Context, specName string, clusterProxy frame
return namespace, cancelWatches
}

func unsetupNamespace(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, namespace *corev1.Namespace) {
Byf("Deleting namespace used for hosting the %q test spec", specName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
Deleter: clusterProxy.GetClient(),
Name: namespace.Name,
})
}

func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool) {
Byf("Dumping logs from the %q workload cluster", cluster.Name)

Expand All @@ -89,11 +97,7 @@ func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr
Namespace: namespace.Name,
}, intervalsGetter(specName, "wait-delete-cluster")...)

Byf("Deleting namespace used for hosting the %q test spec", specName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
Deleter: clusterProxy.GetClient(),
Name: namespace.Name,
})
unsetupNamespace(ctx, specName, clusterProxy, namespace)
}
cancelWatches()
}
Expand Down
22 changes: 22 additions & 0 deletions test/e2e/test_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ type testHelperInterface interface {
deployCluster(params deployClusterParams, clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult)
deployClusterAndWait(params deployClusterParams, clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult)
deleteSecret(params deleteSecretParams)
deleteAllClustersAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, namespace *corev1.Namespace, intervalsGetter func(spec, key string) []interface{})
deleteClusterAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{})
findGPU(ctx context.Context, gpuName string) *prismGoClientV3.GPU
generateNMTName(clusterName string) string
generateNMTProviderID(clusterName string) string
Expand All @@ -144,6 +146,7 @@ type testHelperInterface interface {
getNutanixResourceIdentifierFromEnv(envVarKey string) infrav1.NutanixResourceIdentifier
getNutanixResourceIdentifierFromE2eConfig(variableKey string) infrav1.NutanixResourceIdentifier
getVariableFromE2eConfig(variableKey string) string
updateVariableInE2eConfig(variableKey string, variableValue string)
stripNutanixIDFromProviderID(providerID string) string
verifyCategoryExists(ctx context.Context, categoryKey, categoyValue string)
verifyCategoriesNutanixMachines(ctx context.Context, clusterName, namespace string, expectedCategories map[string]string)
Expand Down Expand Up @@ -465,6 +468,20 @@ func (t testHelper) deployClusterAndWait(params deployClusterParams, clusterReso
}, clusterResources)
}

func (t testHelper) deleteAllClustersAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, namespace *corev1.Namespace, intervalsGetter func(spec, key string) []interface{}) {
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
Client: bootstrapClusterProxy.GetClient(),
Namespace: namespace.Name,
}, intervalsGetter(specName, "wait-delete-cluster")...)
}

func (t testHelper) deleteClusterAndWait(ctx context.Context, specName string, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}) {
framework.DeleteClusterAndWait(ctx, framework.DeleteClusterAndWaitInput{
Client: bootstrapClusterProxy.GetClient(),
Cluster: cluster,
}, intervalsGetter(specName, "wait-delete-cluster")...)
}

func (t testHelper) generateNMTName(clusterName string) string {
return fmt.Sprintf("%s-mt-0", clusterName)
}
Expand Down Expand Up @@ -559,6 +576,11 @@ func (t testHelper) getVariableFromE2eConfig(variableKey string) string {
return variableValue
}

func (t testHelper) updateVariableInE2eConfig(variableKey string, variableValue string) {
t.e2eConfig.Variables[variableKey] = variableValue
os.Setenv(variableKey, variableValue)
}

func (t testHelper) stripNutanixIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, nutanixProviderIDPrefix)
}
Expand Down
Loading