Skip to content

Commit

Permalink
Fix e2e tests creating old bundle workload clusters (#8325)
Browse files Browse the repository at this point in the history
  • Loading branch information
g-gaston authored Jun 14, 2024
1 parent bd7a7cc commit f2629b8
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 6 deletions.
1 change: 1 addition & 0 deletions test/e2e/docker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1050,6 +1050,7 @@ func TestDockerKubernetes129to130UpgradeFromLatestMinorReleaseAPI(t *testing.T)
runMulticlusterUpgradeFromReleaseFlowAPI(
test,
release,
wc.ClusterConfig.Cluster.Spec.KubernetesVersion,
v1alpha1.Kube130,
"",
)
Expand Down
18 changes: 18 additions & 0 deletions test/e2e/suite.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
//go:build e2e
// +build e2e

package e2e

import (
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
)

func init() {
// This is necessary because the test framework builds controller-runtime
// k8s client, and the library requires SetLogger to be called before
// it's used. Otherwise it prints a confusing warning and it hides any
// other client log.
// There might a better way for this, but this will do for now.
ctrl.SetLogger(klog.Background())
}
34 changes: 28 additions & 6 deletions test/e2e/upgrade_from_latest.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,23 @@ func runInPlaceUpgradeFromReleaseFlow(test *framework.ClusterE2ETest, latestRele
test.DeleteCluster()
}

func runMulticlusterUpgradeFromReleaseFlowAPI(test *framework.MulticlusterE2ETest, release *releasev1.EksARelease, kubeVersion anywherev1.KubernetesVersion, os framework.OS) {
// runMulticlusterUpgradeFromReleaseFlowAPI tests the ability to create workload clusters with an old Bundle in a management cluster
// that has been updated to a new Bundle. It follows the following steps:
// 1. Create a management cluster with the old Bundle.
// 2. Create workload clusters with the old Bundle.
// 3. Upgrade the management cluster to the new Bundle and new Kubernetes version (newVersion).
// 4. Upgrade the workload clusters to the new Bundle and new Kubernetes version (newVersion).
// 5. Delete the workload clusters.
// 6. Re-create the workload clusters with the old Bundle and previous Kubernetes version (oldVersion). It's necessary to sometimes
// use a different kube version because the old Bundle might not support the new kubernetes version.
// 7. Delete the workload clusters.
// 8. Delete the management cluster.
func runMulticlusterUpgradeFromReleaseFlowAPI(test *framework.MulticlusterE2ETest, release *releasev1.EksARelease, oldVersion, newVersion anywherev1.KubernetesVersion, os framework.OS) {
provider := test.ManagementCluster.Provider
// 1. Create management cluster
test.CreateManagementCluster(framework.ExecuteWithEksaRelease(release))

// 2. Create workload clusters with the old Bundle
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.CreateCluster(framework.ExecuteWithEksaRelease(release))
wc.ValidateCluster(wc.ClusterConfig.Cluster.Spec.KubernetesVersion)
Expand All @@ -79,47 +92,56 @@ func runMulticlusterUpgradeFromReleaseFlowAPI(test *framework.MulticlusterE2ETes
oldCluster := test.ManagementCluster.GetEKSACluster()

test.ManagementCluster.UpdateClusterConfig(
provider.WithKubeVersionAndOS(kubeVersion, os, nil),
provider.WithKubeVersionAndOS(newVersion, os, nil),
)
// 3. Upgrade management cluster to new Bundle and new Kubernetes version
test.ManagementCluster.UpgradeCluster()
test.ManagementCluster.ValidateCluster(test.ManagementCluster.ClusterConfig.Cluster.Spec.KubernetesVersion)
test.ManagementCluster.StopIfFailed()

cluster := test.ManagementCluster.GetEKSACluster()

// Upgrade bundle workload clusters now because they still have the old versions of the bundle.
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
// 4. Upgrade the workload clusters to the new Bundle and new Kubernetes version (newVersion).
wc.UpdateClusterConfig(
provider.WithKubeVersionAndOS(kubeVersion, os, nil),
provider.WithKubeVersionAndOS(newVersion, os, nil),
api.ClusterToConfigFiller(
api.WithEksaVersion(cluster.Spec.EksaVersion),
),
)
wc.ApplyClusterManifest()
wc.ValidateClusterState()
wc.StopIfFailed()
// 5. Delete the workload clusters.
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
wc.StopIfFailed()
})

// Create workload cluster with old bundle
test.RunConcurrentlyInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.UpdateClusterConfig(
provider.WithKubeVersionAndOS(kubeVersion, os, release),
provider.WithKubeVersionAndOS(oldVersion, os, release),
api.ClusterToConfigFiller(
api.WithEksaVersion(oldCluster.Spec.EksaVersion),
),
)
// 6. Re-create the workload clusters with the old Bundle and previous Kubernetes version (oldVersion).
wc.ApplyClusterManifest()
wc.WaitForKubeconfig()
wc.ValidateClusterState()
wc.StopIfFailed()
// 7. Delete the workload clusters.
wc.DeleteClusterWithKubectl()
wc.ValidateClusterDelete()
})

// It's necessary to call stop here because if any of the workload clusters failed,
// their panic was thrown in a go routine, which doesn't stop the main test routine.
test.RunInWorkloadClusters(func(wc *framework.WorkloadCluster) {
wc.StopIfFailed()
})

// 8. Delete the management cluster.
test.DeleteManagementCluster()
}

Expand Down
1 change: 1 addition & 0 deletions test/e2e/vsphere_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4329,6 +4329,7 @@ func TestVSphereKubernetes127to128UpgradeFromLatestMinorReleaseBottleRocketAPI(t
runMulticlusterUpgradeFromReleaseFlowAPI(
test,
release,
wc.ClusterConfig.Cluster.Spec.KubernetesVersion,
v1alpha1.Kube128,
framework.Bottlerocket1,
)
Expand Down

0 comments on commit f2629b8

Please sign in to comment.