diff --git a/.github/workflows/go-coverage.yml b/.github/workflows/go-coverage.yml index 8d94b44112c4..4dbae47db862 100644 --- a/.github/workflows/go-coverage.yml +++ b/.github/workflows/go-coverage.yml @@ -22,7 +22,7 @@ jobs: - name: Run go test with coverage run: COVER_PROFILE=coverage.txt make coverage-unit-test - name: Codecov upload - uses: codecov/codecov-action@v4.1.0 + uses: codecov/codecov-action@v4.4.1 with: files: ./coverage.txt token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index a52e5288faad..348f2c838c0a 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -23,7 +23,7 @@ jobs: check-latest: true cache: true - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: v1.56.2 only-new-issues: true diff --git a/.github/workflows/vulnerability.yml b/.github/workflows/vulnerability.yml index 9f8522e0ce47..1900fdad88ec 100644 --- a/.github/workflows/vulnerability.yml +++ b/.github/workflows/vulnerability.yml @@ -6,6 +6,9 @@ on: branches: - main pull_request: + paths-ignore: + - '**.yaml' + - '**.yml' workflow_dispatch: schedule: # every day at 7am UTC diff --git a/Makefile b/Makefile index 747f5ba5d233..054ee497cb29 100644 --- a/Makefile +++ b/Makefile @@ -46,15 +46,14 @@ endif ifeq (,$(findstring $(BRANCH_NAME),main)) ## use the branch-specific bundle manifest if the branch is not 'main' -BUNDLE_MANIFEST_URL?=https://dev-release-assets.eks-anywhere.model-rocket.aws.dev/${BRANCH_NAME}/bundle-release.yaml RELEASE_MANIFEST_URL?=$(RELEASE_MANIFEST_HOST)/${BRANCH_NAME}/eks-a-release.yaml LATEST=$(BRANCH_NAME) else ## use the standard bundle manifest if the branch is 'main' -BUNDLE_MANIFEST_URL?=https://dev-release-assets.eks-anywhere.model-rocket.aws.dev/bundle-release.yaml RELEASE_MANIFEST_URL?=$(RELEASE_MANIFEST_HOST)/eks-a-release.yaml LATEST=latest endif +BUNDLE_MANIFEST_URL?=$(shell curl $(RELEASE_MANIFEST_URL) | yq ".spec.releases[-1].bundleManifestUrl") # DEV_GIT_VERSION should be something like v0.19.0-dev+latest, depending on the base branch # and if this is a local build or a CI build. @@ -160,7 +159,7 @@ EKS_A_CROSS_PLATFORMS := $(foreach platform,$(EKS_A_PLATFORMS),eks-a-cross-platf E2E_CROSS_PLATFORMS := $(foreach platform,$(EKS_A_PLATFORMS),e2e-cross-platform-$(platform)) EKS_A_RELEASE_CROSS_PLATFORMS := $(foreach platform,$(EKS_A_PLATFORMS),eks-a-release-cross-platform-$(platform)) -DOCKER_E2E_TEST := TestDockerKubernetes125SimpleFlow +DOCKER_E2E_TEST := TestDockerKubernetes130SimpleFlow LOCAL_E2E_TESTS ?= $(DOCKER_E2E_TEST) EMBED_CONFIG_FOLDER = pkg/files/config @@ -568,7 +567,7 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/bootstrapper/mocks/bootstrapper.go -package=mocks "github.com/aws/eks-anywhere/pkg/bootstrapper" ClusterClient ${MOCKGEN} -destination=pkg/git/providers/github/mocks/github.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/providers/github" GithubClient ${MOCKGEN} -destination=pkg/git/mocks/git.go -package=mocks "github.com/aws/eks-anywhere/pkg/git" Client,ProviderClient - ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter + ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageManager,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover ${MOCKGEN} -destination=pkg/git/gogithub/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gogithub" Client ${MOCKGEN} -destination=pkg/git/gitclient/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gitclient" GoGit ${MOCKGEN} -destination=pkg/validations/mocks/docker.go -package=mocks "github.com/aws/eks-anywhere/pkg/validations" DockerExecutable diff --git a/OWNERS b/OWNERS index c025af06240f..e180957661db 100644 --- a/OWNERS +++ b/OWNERS @@ -2,7 +2,6 @@ approvers: - abhay-krishna - abhinavmpandey08 - ahreehong -- chrisdoherty4 - cxbrowne1207 - d8660091 - drewvanstone @@ -18,4 +17,4 @@ approvers: - taneyland - tatlat - vignesh-goutham -- vivek-koppuru \ No newline at end of file +- vivek-koppuru diff --git a/cmd/eks-a-tool/cmd/cloudstackrmvms.go b/cmd/eks-a-tool/cmd/cloudstackrmvms.go index ea2ee6d8a9d7..212f9b34837c 100644 --- a/cmd/eks-a-tool/cmd/cloudstackrmvms.go +++ b/cmd/eks-a-tool/cmd/cloudstackrmvms.go @@ -25,7 +25,7 @@ var cloudstackRmVmsCmd = &cobra.Command{ if err != nil { return err } - err = cleanup.CleanUpCloudstackTestResources(cmd.Context(), clusterName, viper.GetBool(dryRunFlag)) + err = cleanup.CloudstackTestResources(cmd.Context(), clusterName, viper.GetBool(dryRunFlag), false) if err != nil { log.Fatalf("Error removing vms: %v", err) } diff --git a/cmd/eks-a-tool/cmd/nutanixrmvms.go b/cmd/eks-a-tool/cmd/nutanixrmvms.go index 87ddf517739c..3878960954a0 100644 --- a/cmd/eks-a-tool/cmd/nutanixrmvms.go +++ b/cmd/eks-a-tool/cmd/nutanixrmvms.go @@ -31,7 +31,7 @@ var nutanixRmVmsCmd = &cobra.Command{ if viper.IsSet(insecureFlag) { insecure = true } - err = cleanup.NutanixTestResourcesCleanup(cmd.Context(), clusterName, viper.GetString(endpointFlag), viper.GetString(portFlag), insecure, viper.GetBool(ignoreErrorsFlag)) + err = cleanup.NutanixTestResources(clusterName, viper.GetString(endpointFlag), viper.GetString(portFlag), insecure, viper.GetBool(ignoreErrorsFlag)) if err != nil { log.Fatalf("Error removing vms: %v", err) } diff --git a/cmd/eksctl-anywhere/cmd/common.go b/cmd/eksctl-anywhere/cmd/common.go index f24cb8a41fd9..9fc05856dc43 100644 --- a/cmd/eksctl-anywhere/cmd/common.go +++ b/cmd/eksctl-anywhere/cmd/common.go @@ -3,12 +3,14 @@ package cmd import ( "context" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/dependencies" "github.com/aws/eks-anywhere/pkg/files" "github.com/aws/eks-anywhere/pkg/helm" "github.com/aws/eks-anywhere/pkg/kubeconfig" "github.com/aws/eks-anywhere/pkg/manifests/bundles" + "github.com/aws/eks-anywhere/pkg/registrymirror" "github.com/aws/eks-anywhere/pkg/version" "github.com/aws/eks-anywhere/release/api/v1alpha1" ) @@ -50,7 +52,7 @@ func getKubeconfigPath(clusterName, override string) string { func NewDependenciesForPackages(ctx context.Context, opts ...PackageOpt) (*dependencies.Dependencies, error) { config := New(opts...) - return dependencies.NewFactory(). + f := dependencies.NewFactory(). WithExecutableMountDirs(config.mountPaths...). WithCustomBundles(config.bundlesOverride). WithExecutableBuilder(). @@ -59,8 +61,13 @@ func NewDependenciesForPackages(ctx context.Context, opts ...PackageOpt) (*depen WithHelm(helm.WithInsecure()). WithCuratedPackagesRegistry(config.registryName, config.kubeVersion, version.Get()). WithPackageControllerClient(config.spec, config.kubeConfig). - WithLogger(). - Build(ctx) + WithLogger() + + if config.cluster != nil && config.cluster.Spec.RegistryMirrorConfiguration != nil { + f.WithRegistryMirror(registrymirror.FromCluster(config.cluster)) + } + + return f.Build(ctx) } type PackageOpt func(*PackageConfig) @@ -72,6 +79,7 @@ type PackageConfig struct { mountPaths []string spec *cluster.Spec bundlesOverride string + cluster *anywherev1.Cluster } func New(options ...PackageOpt) *PackageConfig { @@ -118,3 +126,10 @@ func WithBundlesOverride(bundlesOverride string) func(*PackageConfig) { config.bundlesOverride = bundlesOverride } } + +// WithCluster sets cluster in the config with incoming value. +func WithCluster(cluster *anywherev1.Cluster) func(config *PackageConfig) { + return func(config *PackageConfig) { + config.cluster = cluster + } +} diff --git a/cmd/eksctl-anywhere/cmd/createcluster.go b/cmd/eksctl-anywhere/cmd/createcluster.go index 57d3e4e76b57..dc5b576d74b1 100644 --- a/cmd/eksctl-anywhere/cmd/createcluster.go +++ b/cmd/eksctl-anywhere/cmd/createcluster.go @@ -185,12 +185,13 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er WithGitOpsFlux(clusterSpec.Cluster, clusterSpec.FluxConfig, cliConfig). WithWriter(). WithEksdInstaller(). - WithPackageInstaller(clusterSpec, cc.installPackages, cc.managementKubeconfig). + WithPackageManager(clusterSpec, cc.installPackages, cc.managementKubeconfig). WithValidatorClients(). WithCreateClusterDefaulter(createCLIConfig). WithClusterApplier(). WithKubeconfigWriter(clusterSpec.Cluster). - WithClusterCreator(clusterSpec.Cluster) + WithClusterCreator(clusterSpec.Cluster). + WithClusterMover() if cc.timeoutOptions.noTimeouts { factory.WithNoTimeouts() @@ -254,7 +255,7 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er deps.GitOpsFlux, deps.Writer, deps.EksdInstaller, - deps.PackageInstaller, + deps.PackageManager, deps.ClusterCreator, deps.UnAuthKubectlClient, ) @@ -271,9 +272,10 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er deps.GitOpsFlux, deps.Writer, deps.EksdInstaller, - deps.PackageInstaller, + deps.PackageManager, deps.ClusterCreator, deps.EksaInstaller, + deps.ClusterMover, ) err = createMgmtCluster.Run(ctx, clusterSpec, createValidations) diff --git a/cmd/eksctl-anywhere/cmd/deletecluster.go b/cmd/eksctl-anywhere/cmd/deletecluster.go index 00e0465b3225..0893eb995d14 100644 --- a/cmd/eksctl-anywhere/cmd/deletecluster.go +++ b/cmd/eksctl-anywhere/cmd/deletecluster.go @@ -126,6 +126,7 @@ func (dc *deleteClusterOptions) deleteCluster(ctx context.Context) error { WithEksdInstaller(). WithEKSAInstaller(). WithUnAuthKubeClient(). + WithClusterMover(). Build(ctx) if err != nil { return err @@ -154,7 +155,7 @@ func (dc *deleteClusterOptions) deleteCluster(ctx context.Context) error { deleteWorkload := workload.NewDelete(deps.Provider, deps.Writer, deps.ClusterManager, deps.ClusterDeleter, deps.GitOpsFlux) err = deleteWorkload.Run(ctx, cluster, clusterSpec) } else { - deleteManagement := management.NewDelete(deps.Bootstrapper, deps.Provider, deps.Writer, deps.ClusterManager, deps.GitOpsFlux, deps.ClusterDeleter, deps.EksdInstaller, deps.EksaInstaller, deps.UnAuthKubeClient) + deleteManagement := management.NewDelete(deps.Bootstrapper, deps.Provider, deps.Writer, deps.ClusterManager, deps.GitOpsFlux, deps.ClusterDeleter, deps.EksdInstaller, deps.EksaInstaller, deps.UnAuthKubeClient, deps.ClusterMover) err = deleteManagement.Run(ctx, cluster, clusterSpec) } cleanup(deps, &err) diff --git a/cmd/eksctl-anywhere/cmd/generateclusterconfig.go b/cmd/eksctl-anywhere/cmd/generateclusterconfig.go index 0d1ac32269b3..78ce9302dd47 100644 --- a/cmd/eksctl-anywhere/cmd/generateclusterconfig.go +++ b/cmd/eksctl-anywhere/cmd/generateclusterconfig.go @@ -49,7 +49,7 @@ func preRunGenerateClusterConfig(cmd *cobra.Command, args []string) { func init() { generateCmd.AddCommand(generateClusterConfigCmd) - generateClusterConfigCmd.Flags().StringP("provider", "p", "", "Provider to use (vsphere or tinkerbell or docker)") + generateClusterConfigCmd.Flags().StringP("provider", "p", "", fmt.Sprintf("Provider to use (%s)", strings.Join(constants.SupportedProviders, " or "))) err := generateClusterConfigCmd.MarkFlagRequired("provider") if err != nil { log.Fatalf("marking flag as required: %v", err) diff --git a/cmd/eksctl-anywhere/cmd/generatepackage.go b/cmd/eksctl-anywhere/cmd/generatepackage.go index e75896f3f3eb..37a5501e3fc7 100644 --- a/cmd/eksctl-anywhere/cmd/generatepackage.go +++ b/cmd/eksctl-anywhere/cmd/generatepackage.go @@ -6,7 +6,11 @@ import ( "log" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/types" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/curatedpackages" "github.com/aws/eks-anywhere/pkg/kubeconfig" ) @@ -70,7 +74,22 @@ func generatePackages(ctx context.Context, args []string) error { return err } - deps, err := NewDependenciesForPackages(ctx, WithRegistryName(gpOptions.registry), WithKubeVersion(gpOptions.kubeVersion), WithMountPaths(kubeConfig), WithBundlesOverride(gpOptions.bundlesOverride)) + k8sClient, err := kubernetes.NewRuntimeClientFromFileName(kubeConfig) + if err != nil { + return fmt.Errorf("unable to initalize k8s client: %v", err) + } + + cluster := &anywherev1.Cluster{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: gpOptions.clusterName, Namespace: constants.DefaultNamespace}, cluster); err != nil { + return fmt.Errorf("unable to get cluster %s: %v", gpOptions.clusterName, err) + } + + deps, err := NewDependenciesForPackages(ctx, + WithRegistryName(gpOptions.registry), + WithKubeVersion(gpOptions.kubeVersion), + WithMountPaths(kubeConfig), + WithBundlesOverride(gpOptions.bundlesOverride), + WithCluster(cluster)) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) } diff --git a/cmd/eksctl-anywhere/cmd/installpackagecontroller.go b/cmd/eksctl-anywhere/cmd/installpackagecontroller.go index 28cb2dd421c2..17e9196f491a 100644 --- a/cmd/eksctl-anywhere/cmd/installpackagecontroller.go +++ b/cmd/eksctl-anywhere/cmd/installpackagecontroller.go @@ -59,7 +59,12 @@ func installPackageController(ctx context.Context) error { return fmt.Errorf("the cluster config file provided is invalid: %v", err) } - deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithClusterSpec(clusterSpec), WithKubeConfig(ico.kubeConfig), WithBundlesOverride(ico.bundlesOverride)) + deps, err := NewDependenciesForPackages(ctx, + WithMountPaths(kubeConfig), + WithClusterSpec(clusterSpec), + WithKubeConfig(ico.kubeConfig), + WithBundlesOverride(ico.bundlesOverride), + WithCluster(clusterSpec.Cluster)) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) } diff --git a/cmd/eksctl-anywhere/cmd/upgradecluster.go b/cmd/eksctl-anywhere/cmd/upgradecluster.go index cb199030269f..27866be23897 100644 --- a/cmd/eksctl-anywhere/cmd/upgradecluster.go +++ b/cmd/eksctl-anywhere/cmd/upgradecluster.go @@ -159,6 +159,7 @@ func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command, args []strin WithEksdInstaller(). WithKubectl(). WithValidatorClients(). + WithPackageManagerWithoutWait(clusterSpec, "", uc.managementKubeconfig). WithUpgradeClusterDefaulter(upgradeCLIConfig) if uc.timeoutOptions.noTimeouts { @@ -212,6 +213,7 @@ func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command, args []strin deps.EksdUpgrader, deps.EksdInstaller, deps.ClusterApplier, + deps.PackageManager, ) err = upgrade.Run(ctx, clusterSpec, managementCluster, upgradeValidations) @@ -225,7 +227,7 @@ func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command, args []strin deps.Writer, deps.ClusterApplier, deps.EksdInstaller, - deps.PackageInstaller, + deps.PackageManager, ) err = upgradeWorkloadCluster.Run(ctx, workloadCluster, clusterSpec, upgradeValidations) } diff --git a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml index 399c743881e3..50584ea748c2 100644 --- a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml @@ -63,6 +63,7 @@ env: T_IRSA_S3_BUCKET: "etcd-encryption:irsa_s3_bucket" T_KMS_IAM_ROLE: "etcd-encryption:kms_iam_role_arn" T_KMS_IMAGE: "etcd-encryption:kms_image" + T_POD_IDENTITY_WEBHOOK_IMAGE: "etcd-encryption:pod_identity_webhook_image" T_KMS_KEY_ARN: "etcd-encryption:kms_key_arn" T_KMS_KEY_REGION: "etcd-encryption:region" T_KMS_SOCKET: "etcd-encryption:socket" @@ -82,7 +83,8 @@ phases: - > ./bin/test e2e cleanup cloudstack -n ${CLUSTER_NAME_PREFIX} - -v 4 + --delete-duplicate-networks + -v 6 build: commands: - export JOB_ID=$CODEBUILD_BUILD_ID @@ -105,7 +107,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml index e31e007051e5..d9bb3ea7bbd5 100644 --- a/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml @@ -4,7 +4,7 @@ env: variables: INTEGRATION_TEST_MAX_EC2_COUNT: 25 INTEGRATION_TEST_MAX_CONCURRENT_TEST_COUNT: 25 - T_TINKERBELL_MAX_HARDWARE_PER_TEST: 5 + T_TINKERBELL_MAX_HARDWARE_PER_TEST: 2 T_CLOUDSTACK_CIDR: "10.80.191.0/24" CLOUDSTACK_PROVIDER: true T_TINKERBELL_INVENTORY_CSV: "hardware-manifests/inventory.csv" @@ -13,27 +13,26 @@ env: TEST_RUNNER_GOVC_TEMPLATE: "eks-a-admin-ci" INTEGRATION_TEST_INFRA_CONFIG: "/tmp/test-infra.yml" T_VSPHERE_TEMPLATE_FOLDER: "/SDDC-Datacenter/vm/Templates" - T_VSPHERE_TEMPLATE_UBUNTU_1_22: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-22" - T_VSPHERE_TEMPLATE_UBUNTU_1_23: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-23" - T_VSPHERE_TEMPLATE_UBUNTU_1_24: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-24" T_VSPHERE_TEMPLATE_UBUNTU_1_25: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-25" T_VSPHERE_TEMPLATE_UBUNTU_1_26: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-26" T_VSPHERE_TEMPLATE_UBUNTU_1_27: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-27" T_VSPHERE_TEMPLATE_UBUNTU_1_28: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-28" - T_VSPHERE_TEMPLATE_BR_1_22: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-22" - T_VSPHERE_TEMPLATE_BR_1_23: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-23" - T_VSPHERE_TEMPLATE_BR_1_24: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-24" + T_VSPHERE_TEMPLATE_UBUNTU_1_29: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-29" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_25: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-25" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_26: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-26" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_27: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-27" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_28: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-28" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_29: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-29" T_VSPHERE_TEMPLATE_BR_1_25: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-25" T_VSPHERE_TEMPLATE_BR_1_26: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-26" T_VSPHERE_TEMPLATE_BR_1_27: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-27" T_VSPHERE_TEMPLATE_BR_1_28: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-28" - T_VSPHERE_TEMPLATE_REDHAT_1_22: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-22" - T_VSPHERE_TEMPLATE_REDHAT_1_23: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-23" - T_VSPHERE_TEMPLATE_REDHAT_1_24: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-24" + T_VSPHERE_TEMPLATE_BR_1_29: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-29" T_VSPHERE_TEMPLATE_REDHAT_1_25: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-25" T_VSPHERE_TEMPLATE_REDHAT_1_26: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-26" T_VSPHERE_TEMPLATE_REDHAT_1_27: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-27" T_VSPHERE_TEMPLATE_REDHAT_1_28: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-28" + T_VSPHERE_TEMPLATE_REDHAT_1_29: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-29" T_NUTANIX_MACHINE_VCPU_PER_SOCKET: 1 T_NUTANIX_MACHINE_VCPU_SOCKET: 2 T_NUTANIX_MACHINE_MEMORY_SIZE: "4Gi" @@ -82,16 +81,16 @@ env: T_CLOUDSTACK_POD_CIDR: "cloudstack_ci_beta_connection:pod_cidr" T_CLOUDSTACK_SERVICE_CIDR: "cloudstack_ci_beta_connection:service_cidr" T_CLOUDSTACK_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" - T_TINKERBELL_IMAGE_UBUNTU_1_22: "tinkerbell_ci:image_ubuntu_1_22" - T_TINKERBELL_IMAGE_UBUNTU_1_23: "tinkerbell_ci:image_ubuntu_1_23" - T_TINKERBELL_IMAGE_UBUNTU_1_24: "tinkerbell_ci:image_ubuntu_1_24" T_TINKERBELL_IMAGE_UBUNTU_1_25: "tinkerbell_ci:image_ubuntu_1_25" T_TINKERBELL_IMAGE_UBUNTU_1_26: "tinkerbell_ci:image_ubuntu_1_26" T_TINKERBELL_IMAGE_UBUNTU_1_27: "tinkerbell_ci:image_ubuntu_1_27" T_TINKERBELL_IMAGE_UBUNTU_1_28: "tinkerbell_ci:image_ubuntu_1_28" - T_TINKERBELL_IMAGE_REDHAT_1_22: "tinkerbell_ci:image_redhat_1_22" - T_TINKERBELL_IMAGE_REDHAT_1_23: "tinkerbell_ci:image_redhat_1_23" - T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" + T_TINKERBELL_IMAGE_UBUNTU_1_29: "tinkerbell_ci:image_ubuntu_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_25: "tinkerbell_ci:image_ubuntu_2204_1_25" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_26: "tinkerbell_ci:image_ubuntu_2204_1_26" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" T_TINKERBELL_IMAGE_REDHAT_1_26: "tinkerbell_ci:image_redhat_1_26" T_TINKERBELL_IMAGE_REDHAT_1_27: "tinkerbell_ci:image_redhat_1_27" @@ -99,6 +98,7 @@ env: T_TINKERBELL_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" T_TINKERBELL_CP_NETWORK_CIDR: "tinkerbell_ci:cp_network_cidr" T_TINKERBELL_S3_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_inventory_csv" + T_TINKERBELL_S3_AG_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_ag_inventory_csv" TEST_RUNNER_GOVC_USERNAME: "tinkerbell_ci:govc_username" TEST_RUNNER_GOVC_PASSWORD: "tinkerbell_ci:govc_password" TEST_RUNNER_GOVC_URL: "tinkerbell_ci:govc_url" @@ -123,6 +123,18 @@ env: T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_26: "nutanix_ci:nutanix_template_ubuntu_1_26" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27: "nutanix_ci:nutanix_template_ubuntu_1_27" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28: "nutanix_ci:nutanix_template_ubuntu_1_28" + T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29: "nutanix_ci:nutanix_template_ubuntu_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25: "nutanix_ci:nutanix_template_rhel_8_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26: "nutanix_ci:nutanix_template_rhel_8_1_26" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27: "nutanix_ci:nutanix_template_rhel_8_1_27" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28: "nutanix_ci:nutanix_template_rhel_8_1_28" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29: "nutanix_ci:nutanix_template_rhel_8_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25: "nutanix_ci:nutanix_template_rhel_9_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26: "nutanix_ci:nutanix_template_rhel_9_1_26" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27: "nutanix_ci:nutanix_template_rhel_9_1_27" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28: "nutanix_ci:nutanix_template_rhel_9_1_28" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29: "nutanix_ci:nutanix_template_rhel_9_1_29" + phases: pre_build: commands: @@ -151,7 +163,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports reports: e2e-reports: diff --git a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml index 10f3491c6b76..2865acf2b0d3 100644 --- a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml @@ -33,21 +33,21 @@ env: T_NUTANIX_POD_CIDR: "nutanix_ci:nutanix_pod_cidr" T_NUTANIX_SERVICE_CIDR: "nutanix_ci:nutanix_service_cidr" T_NUTANIX_ADDITIONAL_TRUST_BUNDLE: "nutanix_ci:nutanix_additional_trust_bundle" - T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_25: "nutanix_ci:nutanix_template_ubuntu_1_25" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_26: "nutanix_ci:nutanix_template_ubuntu_1_26" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27: "nutanix_ci:nutanix_template_ubuntu_1_27" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28: "nutanix_ci:nutanix_template_ubuntu_1_28" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29: "nutanix_ci:nutanix_template_ubuntu_1_29" - T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25: "nutanix_ci:nutanix_template_rhel_8_1_25" + T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_30: "nutanix_ci:nutanix_template_ubuntu_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26: "nutanix_ci:nutanix_template_rhel_8_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27: "nutanix_ci:nutanix_template_rhel_8_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28: "nutanix_ci:nutanix_template_rhel_8_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29: "nutanix_ci:nutanix_template_rhel_8_1_29" - T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25: "nutanix_ci:nutanix_template_rhel_9_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_30: "nutanix_ci:nutanix_template_rhel_8_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26: "nutanix_ci:nutanix_template_rhel_9_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27: "nutanix_ci:nutanix_template_rhel_9_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28: "nutanix_ci:nutanix_template_rhel_9_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29: "nutanix_ci:nutanix_template_rhel_9_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_30: "nutanix_ci:nutanix_template_rhel_9_1_30" phases: pre_build: @@ -55,10 +55,19 @@ phases: - source ${CODEBUILD_SRC_DIR}/cmd/integration_test/build/script/setup_profile.sh - source ${CODEBUILD_SRC_DIR}/cmd/integration_test/build/script/create_infra_config.sh - ${CODEBUILD_SRC_DIR}/cmd/integration_test/build/script/start_docker.sh + - export CLUSTER_NAME_PREFIX="${BRANCH_NAME//./-}" - | if ! [[ ${CODEBUILD_INITIATOR} =~ "codepipeline" ]]; then make build-eks-a-for-e2e build-integration-test-binary e2e-tests-binary E2E_TAGS="e2e nutanix" E2E_OUTPUT_FILE=bin/nutanix/e2e.test fi + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors + -v 4 build: commands: - export JOB_ID=$CODEBUILD_BUILD_ID @@ -81,10 +90,20 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} + post_build: + commands: + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors + -v 4 reports: e2e-reports: files: diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index 8cc154209292..2fd7871a1662 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -2,6 +2,7 @@ version: 0.2 env: variables: + INTEGRATION_TEST_CLOUDWATCH_NAMESPACE_OVERRIDE: EksaQuickE2ETests INTEGRATION_TEST_MAX_EC2_COUNT: 180 INTEGRATION_TEST_MAX_CONCURRENT_TEST_COUNT: 180 EKSA_GIT_KNOWN_HOSTS: "/tmp/known_hosts" @@ -115,16 +116,18 @@ env: T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27: "nutanix_ci:nutanix_template_ubuntu_1_27" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28: "nutanix_ci:nutanix_template_ubuntu_1_28" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29: "nutanix_ci:nutanix_template_ubuntu_1_29" + T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_30: "nutanix_ci:nutanix_template_ubuntu_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25: "nutanix_ci:nutanix_template_rhel_8_1_25" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26: "nutanix_ci:nutanix_template_rhel_8_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27: "nutanix_ci:nutanix_template_rhel_8_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28: "nutanix_ci:nutanix_template_rhel_8_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29: "nutanix_ci:nutanix_template_rhel_8_1_29" - T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25: "nutanix_ci:nutanix_template_rhel_9_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_30: "nutanix_ci:nutanix_template_rhel_8_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26: "nutanix_ci:nutanix_template_rhel_9_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27: "nutanix_ci:nutanix_template_rhel_9_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28: "nutanix_ci:nutanix_template_rhel_9_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29: "nutanix_ci:nutanix_template_rhel_9_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_30: "nutanix_ci:nutanix_template_rhel_9_1_30" # Snow secrets T_SNOW_DEVICES: "snow_ci:snow_devices" T_SNOW_CREDENTIALS_S3_PATH: "snow_ci:snow_credentials_s3_path" @@ -137,18 +140,22 @@ env: T_TINKERBELL_IMAGE_UBUNTU_1_27: "tinkerbell_ci:image_ubuntu_1_27" T_TINKERBELL_IMAGE_UBUNTU_1_28: "tinkerbell_ci:image_ubuntu_1_28" T_TINKERBELL_IMAGE_UBUNTU_1_29: "tinkerbell_ci:image_ubuntu_1_29" + T_TINKERBELL_IMAGE_UBUNTU_1_30: "tinkerbell_ci:image_ubuntu_1_30" T_TINKERBELL_IMAGE_UBUNTU_2204_1_24: "tinkerbell_ci:image_ubuntu_2204_1_24" T_TINKERBELL_IMAGE_UBUNTU_2204_1_25: "tinkerbell_ci:image_ubuntu_2204_1_25" T_TINKERBELL_IMAGE_UBUNTU_2204_1_26: "tinkerbell_ci:image_ubuntu_2204_1_26" T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_29_RTOS: "tinkerbell_ci:image_ubuntu_2204_1_29_rtos" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_30: "tinkerbell_ci:image_ubuntu_2204_1_30" T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" T_TINKERBELL_IMAGE_REDHAT_1_26: "tinkerbell_ci:image_redhat_1_26" T_TINKERBELL_IMAGE_REDHAT_1_27: "tinkerbell_ci:image_redhat_1_27" T_TINKERBELL_IMAGE_REDHAT_1_28: "tinkerbell_ci:image_redhat_1_28" T_TINKERBELL_IMAGE_REDHAT_1_29: "tinkerbell_ci:image_redhat_1_29" + T_TINKERBELL_IMAGE_REDHAT_1_30: "tinkerbell_ci:image_redhat_1_30" T_TINKERBELL_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" T_TINKERBELL_CP_NETWORK_CIDR: "tinkerbell_ci:cp_network_cidr" T_TINKERBELL_S3_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_inventory_csv" @@ -177,6 +184,15 @@ phases: - > ./bin/test e2e cleanup cloudstack -n ${CLUSTER_NAME_PREFIX} + --delete-duplicate-networks + -v 6 + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors -v 4 build: commands: @@ -203,7 +219,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} @@ -218,6 +234,14 @@ phases: ./bin/test e2e cleanup cloudstack -n ${CLUSTER_NAME_PREFIX} -v 4 + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors + -v 4 reports: e2e-reports: files: diff --git a/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml index 124c1033913b..c7fe1db38e79 100644 --- a/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml @@ -57,7 +57,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml index 128163ae111d..f16b3e232abe 100644 --- a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml @@ -37,18 +37,22 @@ env: T_TINKERBELL_IMAGE_UBUNTU_1_27: "tinkerbell_ci:image_ubuntu_1_27" T_TINKERBELL_IMAGE_UBUNTU_1_28: "tinkerbell_ci:image_ubuntu_1_28" T_TINKERBELL_IMAGE_UBUNTU_1_29: "tinkerbell_ci:image_ubuntu_1_29" + T_TINKERBELL_IMAGE_UBUNTU_1_30: "tinkerbell_ci:image_ubuntu_1_30" T_TINKERBELL_IMAGE_UBUNTU_2204_1_24: "tinkerbell_ci:image_ubuntu_2204_1_24" T_TINKERBELL_IMAGE_UBUNTU_2204_1_25: "tinkerbell_ci:image_ubuntu_2204_1_25" T_TINKERBELL_IMAGE_UBUNTU_2204_1_26: "tinkerbell_ci:image_ubuntu_2204_1_26" T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_29_RTOS: "tinkerbell_ci:image_ubuntu_2204_1_29_rtos" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_30: "tinkerbell_ci:image_ubuntu_2204_1_30" T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" T_TINKERBELL_IMAGE_REDHAT_1_26: "tinkerbell_ci:image_redhat_1_26" T_TINKERBELL_IMAGE_REDHAT_1_27: "tinkerbell_ci:image_redhat_1_27" T_TINKERBELL_IMAGE_REDHAT_1_28: "tinkerbell_ci:image_redhat_1_28" T_TINKERBELL_IMAGE_REDHAT_1_29: "tinkerbell_ci:image_redhat_1_29" + T_TINKERBELL_IMAGE_REDHAT_1_30: "tinkerbell_ci:image_redhat_1_30" T_TINKERBELL_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" T_TINKERBELL_CP_NETWORK_CIDR: "tinkerbell_ci:cp_network_cidr" T_TINKERBELL_S3_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_inventory_csv" @@ -101,7 +105,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml index 3e0adaf91e60..6758074837a0 100644 --- a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml @@ -79,11 +79,16 @@ env: T_PRIVATE_REGISTRY_MIRROR_USERNAME: "harbor-registry-data:authenticated_username" T_PRIVATE_REGISTRY_MIRROR_PASSWORD: "harbor-registry-data:authenticated_password" T_PRIVATE_REGISTRY_MIRROR_CA_CERT: "harbor-registry-data:authenticated_caCert" + T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY1: "harbor-registry-data:ocinamespace_registry1" + T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE1: "harbor-registry-data:ocinamespace_namespace1" + T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY2: "harbor-registry-data:ocinamespace_registry2" + T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE2: "harbor-registry-data:ocinamespace_namespace2" T_NTP_SERVERS: "ntp:servers" T_AWS_IAM_ROLE_ARN: "aws-iam-auth-role:ec2_role_arn" T_IRSA_S3_BUCKET: "etcd-encryption:irsa_s3_bucket" T_KMS_IAM_ROLE: "etcd-encryption:kms_iam_role_arn" T_KMS_IMAGE: "etcd-encryption:kms_image" + T_POD_IDENTITY_WEBHOOK_IMAGE: "etcd-encryption:pod_identity_webhook_image" T_KMS_KEY_ARN: "etcd-encryption:kms_key_arn" T_KMS_KEY_REGION: "etcd-encryption:region" T_KMS_SOCKET: "etcd-encryption:socket" @@ -125,7 +130,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/cmd/cleanupcloudstack.go b/cmd/integration_test/cmd/cleanupcloudstack.go index ba5b83344fd2..422df1394494 100644 --- a/cmd/integration_test/cmd/cleanupcloudstack.go +++ b/cmd/integration_test/cmd/cleanupcloudstack.go @@ -37,11 +37,14 @@ func preRunCleanUpCloudstackSetup(cmd *cobra.Command, args []string) { }) } +const deleteDuplicateNetworksFlag = "delete-duplicate-networks" + var requiredCloudstackCleanUpFlags = []string{clusterNameFlagName} func init() { cleanUpInstancesCmd.AddCommand(cleanUpCloudstackCmd) cleanUpCloudstackCmd.Flags().StringP(clusterNameFlagName, "n", "", "Cluster name for associated vms") + cleanUpCloudstackCmd.Flags().Bool(deleteDuplicateNetworksFlag, false, "Delete duplicate isolated networks") for _, flag := range requiredCloudstackCleanUpFlags { if err := cleanUpCloudstackCmd.MarkFlagRequired(flag); err != nil { @@ -52,7 +55,8 @@ func init() { func cleanUpCloudstackTestResources(ctx context.Context) error { clusterName := viper.GetString(clusterNameFlagName) - err := cleanup.CleanUpCloudstackTestResources(ctx, clusterName, false) + deleteDuplicateNetworks := viper.IsSet(deleteDuplicateNetworksFlag) + err := cleanup.CloudstackTestResources(ctx, clusterName, false, deleteDuplicateNetworks) if err != nil { return fmt.Errorf("running cleanup for cloudstack vms: %v", err) } diff --git a/cmd/integration_test/cmd/cleanupnutanix.go b/cmd/integration_test/cmd/cleanupnutanix.go new file mode 100644 index 000000000000..46426e1c07a3 --- /dev/null +++ b/cmd/integration_test/cmd/cleanupnutanix.go @@ -0,0 +1,72 @@ +package cmd + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "github.com/aws/eks-anywhere/internal/test/cleanup" + "github.com/aws/eks-anywhere/pkg/logger" +) + +const ( + endpointFlag = "endpoint" + portFlag = "port" + insecureFlag = "insecure" + ignoreErrorsFlag = "ignoreErrors" +) + +var requiredNutanixCleanUpFlags = []string{clusterNameFlagName, endpointFlag} + +var cleanUpNutanixCmd = &cobra.Command{ + Use: "nutanix", + Short: "Clean up e2e vms on Nutanix Prism", + Long: "Clean up vms created for e2e testing on Nutanix Prism", + SilenceUsage: true, + PreRun: preRunCleanUpNutanixSetup, + RunE: func(_ *cobra.Command, _ []string) error { + err := cleanUpNutanixTestResources() + if err != nil { + logger.Fatal(err, "Failed to cleanup e2e vms on Nutanix Prism") + } + return nil + }, +} + +func preRunCleanUpNutanixSetup(cmd *cobra.Command, _ []string) { + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + err := viper.BindPFlag(flag.Name, flag) + if err != nil { + log.Fatalf("Error initializing flags: %v", err) + } + }) +} + +func init() { + cleanUpInstancesCmd.AddCommand(cleanUpNutanixCmd) + + cleanUpNutanixCmd.Flags().StringP(clusterNameFlagName, "n", "", "Cluster name for associated vms") + cleanUpNutanixCmd.Flags().StringP(endpointFlag, "e", "", "Nutanix Prism endpoint") + cleanUpNutanixCmd.Flags().StringP(portFlag, "p", "9440", "Nutanix Prism port") + cleanUpNutanixCmd.Flags().BoolP(insecureFlag, "k", false, "skip TLS when contacting Prism APIs") + cleanUpNutanixCmd.Flags().Bool(ignoreErrorsFlag, true, "ignore APIs errors when deleting VMs") + + for _, flag := range requiredNutanixCleanUpFlags { + if err := cleanUpNutanixCmd.MarkFlagRequired(flag); err != nil { + log.Fatalf("Error marking flag %s as required: %v", flag, err) + } + } +} + +func cleanUpNutanixTestResources() error { + clusterName := viper.GetString(clusterNameFlagName) + err := cleanup.NutanixTestResources(clusterName, viper.GetString(endpointFlag), viper.GetString(portFlag), viper.IsSet(insecureFlag), viper.IsSet(ignoreErrorsFlag)) + if err != nil { + return fmt.Errorf("running cleanup for Nutanix vms: %v", err) + } + + return nil +} diff --git a/cmd/integration_test/cmd/run.go b/cmd/integration_test/cmd/run.go index 5e3aee6d0f85..fbb4580cc77f 100644 --- a/cmd/integration_test/cmd/run.go +++ b/cmd/integration_test/cmd/run.go @@ -22,7 +22,7 @@ const ( maxConcurrentTestsFlagName = "max-concurrent-tests" skipFlagName = "skip" bundlesOverrideFlagName = "bundles-override" - cleanupVmsFlagName = "cleanup-vms" + cleanupResourcesFlagName = "cleanup-resources" testReportFolderFlagName = "test-report-folder" branchNameFlagName = "branch-name" instanceConfigFlagName = "instance-config" @@ -66,7 +66,7 @@ func init() { runE2ECmd.Flags().IntP(maxConcurrentTestsFlagName, "p", 1, "Maximum number of parallel tests that can be run at a time") runE2ECmd.Flags().StringSlice(skipFlagName, nil, "List of tests to skip") runE2ECmd.Flags().Bool(bundlesOverrideFlagName, false, "Flag to indicate if the tests should run with a bundles override") - runE2ECmd.Flags().Bool(cleanupVmsFlagName, false, "Flag to indicate if VSphere VMs should be cleaned up automatically as tests complete") + runE2ECmd.Flags().Bool(cleanupResourcesFlagName, false, "Flag to indicate if test resources should be cleaned up automatically as tests complete") runE2ECmd.Flags().String(testReportFolderFlagName, "", "Folder destination for JUnit tests reports") runE2ECmd.Flags().String(branchNameFlagName, "main", "EKS-A origin branch from where the tests are being run") runE2ECmd.Flags().String(baremetalBranchFlagName, "main", "Branch for baremetal tests to run on") @@ -88,7 +88,7 @@ func runE2E(ctx context.Context) error { maxConcurrentTests := viper.GetInt(maxConcurrentTestsFlagName) testsToSkip := viper.GetStringSlice(skipFlagName) bundlesOverride := viper.GetBool(bundlesOverrideFlagName) - cleanupVms := viper.GetBool(cleanupVmsFlagName) + cleanupResources := viper.GetBool(cleanupResourcesFlagName) testReportFolder := viper.GetString(testReportFolderFlagName) branchName := viper.GetString(branchNameFlagName) baremetalBranchName := viper.GetString(baremetalBranchFlagName) @@ -102,7 +102,7 @@ func runE2E(ctx context.Context) error { Regex: testRegex, TestsToSkip: testsToSkip, BundlesOverride: bundlesOverride, - CleanupVms: cleanupVms, + CleanupResources: cleanupResources, TestReportFolder: testReportFolder, BranchName: branchName, TestInstanceConfigFile: instanceConfigFile, diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml index 0f323ebf4a00..7033fcb0363e 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml @@ -178,6 +178,11 @@ spec: required: - host type: object + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on control plane nodes. + type: object + x-kubernetes-preserve-unknown-fields: true labels: additionalProperties: type: string @@ -539,7 +544,8 @@ spec: in the local registry type: string registry: - description: Name refers to the name of the upstream registry + description: Registry refers to the name of the upstream + registry type: string required: - namespace @@ -571,8 +577,13 @@ spec: description: Count defines the number of desired worker nodes. Defaults to 1. type: integer + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on worker nodes. + type: object + x-kubernetes-preserve-unknown-fields: true kubernetesVersion: - description: KuberenetesVersion defines the version for worker + description: KubernetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. type: string diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml index 49d4c9af3a11..ddcf876d2e87 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml @@ -56,6 +56,70 @@ spec: endpoint: description: Endpoint is the Endpoint of Nutanix Prism Central type: string + failureDomains: + description: FailureDomains is the optional list of failure domains + for the Nutanix Datacenter. + items: + description: NutanixDatacenterFailureDomain defines the failure + domain for the Nutanix Datacenter. + properties: + cluster: + description: Cluster is the Prism Element cluster name or uuid + that is connected to the Prism Central. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + name: + description: Name is the unique name of the failure domain. + Name must be between 1 and 64 characters long. It must consist + of only lower case alphanumeric characters and hyphens (-). + It must start and end with an alphanumeric character. + maxLength: 64 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + subnets: + description: Subnets holds the list of subnets identifiers cluster's + network subnets. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix Prism resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + type: array + required: + - name + type: object + type: array insecure: description: Insecure is the optional flag to skip TLS verification. Nutanix Prism Central installation by default ships with a self-signed diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index a113b8cabaff..7077c75c929d 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -3881,6 +3881,11 @@ spec: required: - host type: object + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on control plane nodes. + type: object + x-kubernetes-preserve-unknown-fields: true labels: additionalProperties: type: string @@ -4242,7 +4247,8 @@ spec: in the local registry type: string registry: - description: Name refers to the name of the upstream registry + description: Registry refers to the name of the upstream + registry type: string required: - namespace @@ -4274,8 +4280,13 @@ spec: description: Count defines the number of desired worker nodes. Defaults to 1. type: integer + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on worker nodes. + type: object + x-kubernetes-preserve-unknown-fields: true kubernetesVersion: - description: KuberenetesVersion defines the version for worker + description: KubernetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. type: string @@ -5496,6 +5507,70 @@ spec: endpoint: description: Endpoint is the Endpoint of Nutanix Prism Central type: string + failureDomains: + description: FailureDomains is the optional list of failure domains + for the Nutanix Datacenter. + items: + description: NutanixDatacenterFailureDomain defines the failure + domain for the Nutanix Datacenter. + properties: + cluster: + description: Cluster is the Prism Element cluster name or uuid + that is connected to the Prism Central. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + name: + description: Name is the unique name of the failure domain. + Name must be between 1 and 64 characters long. It must consist + of only lower case alphanumeric characters and hyphens (-). + It must start and end with an alphanumeric character. + maxLength: 64 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + subnets: + description: Subnets holds the list of subnets identifiers cluster's + network subnets. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix Prism resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + type: array + required: + - name + type: object + type: array insecure: description: Insecure is the optional flag to skip TLS verification. Nutanix Prism Central installation by default ships with a self-signed @@ -7115,6 +7190,8 @@ rules: verbs: - create - delete + - get + - list - apiGroups: - "" resources: @@ -7140,6 +7217,7 @@ rules: - delete - get - list + - patch - update - watch - apiGroups: @@ -7172,6 +7250,7 @@ rules: - snowmachineconfigs - tinkerbelldatacenterconfigs - tinkerbellmachineconfigs + - tinkerbelltemplateconfigs - vspheredatacenterconfigs - vspheremachineconfigs verbs: @@ -7193,6 +7272,7 @@ rules: - snowmachineconfigs/finalizers - tinkerbelldatacenterconfigs/finalizers - tinkerbellmachineconfigs/finalizers + - tinkerbelltemplateconfigs/finalizers - vspheredatacenterconfigs/finalizers - vspheremachineconfigs/finalizers verbs: @@ -7209,6 +7289,7 @@ rules: - snowmachineconfigs/status - tinkerbelldatacenterconfigs/status - tinkerbellmachineconfigs/status + - tinkerbelltemplateconfigs/status - vspheredatacenterconfigs/status - vspheremachineconfigs/status verbs: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index bfd3550bb7fe..acd2ae7f0898 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -29,6 +29,8 @@ rules: verbs: - create - delete + - get + - list - apiGroups: - "" resources: @@ -54,6 +56,7 @@ rules: - delete - get - list + - patch - update - watch - apiGroups: @@ -86,6 +89,7 @@ rules: - snowmachineconfigs - tinkerbelldatacenterconfigs - tinkerbellmachineconfigs + - tinkerbelltemplateconfigs - vspheredatacenterconfigs - vspheremachineconfigs verbs: @@ -107,6 +111,7 @@ rules: - snowmachineconfigs/finalizers - tinkerbelldatacenterconfigs/finalizers - tinkerbellmachineconfigs/finalizers + - tinkerbelltemplateconfigs/finalizers - vspheredatacenterconfigs/finalizers - vspheremachineconfigs/finalizers verbs: @@ -123,6 +128,7 @@ rules: - snowmachineconfigs/status - tinkerbelldatacenterconfigs/status - tinkerbellmachineconfigs/status + - tinkerbelltemplateconfigs/status - vspheredatacenterconfigs/status - vspheremachineconfigs/status verbs: diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index f9694fc8c7cf..796f08752856 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -170,15 +170,15 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager, log logr.Logger) } // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch;update -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;delete;update +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;delete;update;patch // +kubebuilder:rbac:groups="",namespace=eksa-system,resources=secrets,verbs=patch;update -// +kubebuilder:rbac:groups="",resources=namespaces,verbs=create;delete +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;create;delete // +kubebuilder:rbac:groups="",resources=nodes,verbs=list // +kubebuilder:rbac:groups=addons.cluster.x-k8s.io,resources=clusterresourcesets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters;gitopsconfigs;snowmachineconfigs;snowdatacenterconfigs;snowippools;vspheredatacenterconfigs;vspheremachineconfigs;dockerdatacenterconfigs;tinkerbellmachineconfigs;tinkerbelldatacenterconfigs;cloudstackdatacenterconfigs;cloudstackmachineconfigs;nutanixdatacenterconfigs;nutanixmachineconfigs;awsiamconfigs;oidcconfigs;awsiamconfigs;fluxconfigs,verbs=get;list;watch;update;patch -// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/status;snowmachineconfigs/status;snowippools/status;vspheredatacenterconfigs/status;vspheremachineconfigs/status;dockerdatacenterconfigs/status;tinkerbelldatacenterconfigs/status;tinkerbellmachineconfigs/status;cloudstackdatacenterconfigs/status;cloudstackmachineconfigs/status;awsiamconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters;gitopsconfigs;snowmachineconfigs;snowdatacenterconfigs;snowippools;vspheredatacenterconfigs;vspheremachineconfigs;dockerdatacenterconfigs;tinkerbellmachineconfigs;tinkerbelltemplateconfigs;tinkerbelldatacenterconfigs;cloudstackdatacenterconfigs;cloudstackmachineconfigs;nutanixdatacenterconfigs;nutanixmachineconfigs;awsiamconfigs;oidcconfigs;awsiamconfigs;fluxconfigs,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/status;snowmachineconfigs/status;snowippools/status;vspheredatacenterconfigs/status;vspheremachineconfigs/status;dockerdatacenterconfigs/status;tinkerbelldatacenterconfigs/status;tinkerbellmachineconfigs/status;tinkerbelltemplateconfigs/status;cloudstackdatacenterconfigs/status;cloudstackmachineconfigs/status;awsiamconfigs/status,verbs=get;update;patch // +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=bundles,verbs=get;list;watch -// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/finalizers;snowmachineconfigs/finalizers;snowippools/finalizers;vspheredatacenterconfigs/finalizers;vspheremachineconfigs/finalizers;cloudstackdatacenterconfigs/finalizers;cloudstackmachineconfigs/finalizers;dockerdatacenterconfigs/finalizers;bundles/finalizers;awsiamconfigs/finalizers;tinkerbelldatacenterconfigs/finalizers;tinkerbellmachineconfigs/finalizers,verbs=update +// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/finalizers;snowmachineconfigs/finalizers;snowippools/finalizers;vspheredatacenterconfigs/finalizers;vspheremachineconfigs/finalizers;cloudstackdatacenterconfigs/finalizers;cloudstackmachineconfigs/finalizers;dockerdatacenterconfigs/finalizers;bundles/finalizers;awsiamconfigs/finalizers;tinkerbelldatacenterconfigs/finalizers;tinkerbellmachineconfigs/finalizers;tinkerbelltemplateconfigs/finalizers,verbs=update // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigtemplates,verbs=create;get;list;patch;update;watch // +kubebuilder:rbac:groups="cluster.x-k8s.io",resources=machinedeployments,verbs=list;watch;get;patch;update;create;delete // +kubebuilder:rbac:groups="cluster.x-k8s.io",resources=clusters,verbs=list;watch;get;patch;update;create;delete diff --git a/controllers/factory.go b/controllers/factory.go index d78ead38d93f..b0d04886f06c 100644 --- a/controllers/factory.go +++ b/controllers/factory.go @@ -561,7 +561,7 @@ func (f *Factory) withAWSIamConfigReconciler() *Factory { } func (f *Factory) withPackageControllerClient() *Factory { - f.dependencyFactory.WithHelm().WithKubectl() + f.dependencyFactory.WithHelm(helm.WithInsecure()).WithKubectl() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { if f.packageControllerClient != nil { diff --git a/designs/api-server-extra-args.md b/designs/api-server-extra-args.md new file mode 100644 index 000000000000..48fef999e4a0 --- /dev/null +++ b/designs/api-server-extra-args.md @@ -0,0 +1,118 @@ +# Allow users to configure kube-apiserver flags + +## Problem Statement + +A customer is currently using OIDC for authenticating the kubernetes service accounts(KSA) and they need some mechanism to configure the kube-apiserver flags for their usecase. The main issue that we are addressing in this document is how we want to allow users to be able to configure these flags. + +## Overview of Solution + +Allow users to configure the flags by exposing a map in the cluster spec yaml + +**Schema:** + +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: mgmt-cluster +spec: + ... + controlPlaneConfiguration: + ... + # More control plane components can be added here in the future + apiServerExtraArgs: + ... + "service-account-issuer": "https://{my-service-account-issuer-url}" + "service-account-jwks-uri": "https://{my-service-account-issuer-url}/openid/v1/jwks" + "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" + "service-account-key-file": "/etc/kubernetes/pki/sa.pub" +``` + +**Validations:** + +* Validate that oidc flags are not configured in apiServerExtraArgs if OIDCConfig identity provider is already configured in the spec +* Validate that the feature flag is enabled for configuring apiServerExtraArgs + +**Pros:** + +* Creates a standard way of exposing any flag for the control plane components +* Gives more flexibility to the users in terms of validating the flag values for the api-server + +**Cons:** + +* Does not enforce OIDC compliance or any other validations on the allowed values for the flags + +## Alternate Solutions + +Allow users to configure the flags as a struct field in the cluster spec yaml + +**Schema:** + +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: mgmt-cluster +spec: + ... + controlPlaneConfiguration: + ... + apiServerConfiguration: + ... + serviceAccountIssuer: + - "https://{my-service-account-issuer-url}" + serviceAccountJwksUri: "https://{my-service-account-issuer-url}/openid/v1/jwks" + serviceAccountSigningKeyFile: "/etc/kubernetes/pki/sa.key" + serviceAccountKeyFile: "/etc/kubernetes/pki/sa.pub" +``` + +**Validations:** + +* Validate that both serviceAccountIssuer and serviceAccountJwksUri have same domain and use https scheme +* Additional set of validations specific to each of the flags + +**Pros:** + +* Fails fast if any of the flags are misconfigured with invalid values +* Allows enforcing OIDC compliance for the service account flags of the api-server + +**Cons:** + +* Gives less flexibility to the users for configuring the flags in terms of number of validations +* Does not provide a standard way to configure the flags +* Difficult to validate each and every flag and debug any issues with apiserver + +## Implementation Details + +``` +apiServerExtraArgs: + "service-account-issuer": "https://{my-service-account-issuer-url}" + "service-account-jwks-uri": "https://{my-service-account-issuer-url}/openid/v1/jwks" +``` + +https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags +These flags will be fetched from the cluster spec and added to the apiServerExtraArgs in the ClusterConfiguration object during create and upgrade operations for generating control plane CAPI spec + +Users need to enable the feature flag `API_SERVER_EXTRA_ARGS_ENABLED=true` to configure the api server flags in the cluster spec. If it's not enabled, then it will throw an error when validating the cluster spec. This is done in order to expose this functionality for now before we determine to support it officially with some more robust validations. + +The `service-account-issuer` flag can be configured for both podIamConfig as well as controlPlaneConfiguration to enable both features. If both are configured, the podIamConfig url will be appended to the controlPlaneConfiguration url. + +If OIDCConfig is specified in the identityProviderRefs within the spec, then oidc flags cannot be configured in the apiServerExtraArgs and the CLI will throw an error. + +## Documentation + +We would have to add `controlPlaneConfiguration.apiServerExtraArgs` as an optional configuration for the cluster spec in our EKS-A docs + +## Migration plan for existing flags + +* Phase 1: We can add more flags to the above options and have validations for the existing flags configured in some other fields to make sure that there is no conflict between them and allow only one of them to be configured +* Phase 2: We can decide on the priority among the existing conflicting fields and if the flags are configured in multiple fields, the one with higher priority will have precedence and will be used in the cluster +* Phase 3: We can deprecate all the lower priority conflicting fields for the existing flags and have only one standardized way of configuring all the flags + +## References + +* https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/1393-oidc-discovery +* https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery +* https://openid.net/developers/how-connect-works/ +* https://auth0.com/docs/get-started/authentication-and-authorization-flow/authorization-code-flow + diff --git a/designs/expose-metrics.md b/designs/expose-metrics.md new file mode 100644 index 000000000000..0fb8b41b29d6 --- /dev/null +++ b/designs/expose-metrics.md @@ -0,0 +1,371 @@ +# Expose metrics for all EKS Anywhere components securely + +## Problem Statement + +Customers want to scrape the metrics of various EKS Anywhere components with Prometheus in order to understand and monitor the state of a cluster. In EKS Anywhere, metrics of only some Kubernetes system components (kube-apiserver, kubelet, coredns, kube-vip, cert-manager, cilium) are exposed by default. Other system components such as `kube-controller-manager` and `kube-scheduler` are configured with the default `--bind-address=127.0.0.1` (localhost). + +Below are some examples of customer requests for exposing metrics: + +* https://github.com/aws/eks-anywhere/issues/4299 +* https://github.com/aws/eks-anywhere/issues/4405 +* https://github.com/aws/eks-anywhere/issues/7106 + +## Goals and Objectives + +As an EKS Anywhere user, I would like to: + +* Expose all Kubernetes system component metrics securely with authentication and authorization enabled +* Expose metrics from all EKS Anywhere, CAPI etcd components, and CAPI provider-specific components securely + +## Statement of Scope + +**In Scope:** + +Exposing metrics securely for the following components: + +1. Kubernetes system components + +* kube-controller-manager +* kube-scheduler +* kube-proxy + +2. EKS Anywhere components + +* eks-anywhere-controller-manager controller +* eks-anywhere-packages controller + +3. CAPI etcd components + +* etcdadm-bootstrap-provider controller +* etcdadm-controller-controller-manager controller + +4. CAPI provider-specific components + +* capt-controller-manager controller +* capc-controller-manager controller +* capv-controller-manager controller +* capx-controller-manager controller + +**Out of Scope:** + +Following components are not considered for exposing metrics securely: + +* Snow provider (capas) and Docker provider (capd) +* ECR-credential-provider + +**Future Scope:** + +* Securely expose metrics for all other components (kube-vip, coredns, cilium, and cert-manager) + +## Current State of EKS Anywhere components + +![table](images/expose-metrics.png) + +## Overview of Solution + +There are two general solutions proposed for different components here: + +* For Kubernetes system components, documenting the steps to configure some proxy as a daemonset. No code changes needed in EKS Anywhere +* For all other controller-runtime based CAPI and EKS Anywhere components, implementing the CAPI [diagnostics](https://main.cluster-api.sigs.k8s.io/tasks/diagnostics) feature and exposing the bind address with 0.0.0.0 + +For Kubernetes system components, we don't just want to change the default bind address to 0.0.0.0 because configuring it to bind on all interfaces might expose the metrics publicly over the internet on a node which has any interface exposed to the internet even if that component has authentication and authorization enabled. (Check this [issue](https://github.com/kubernetes/kubeadm/issues/2244#issuecomment-763294722) for more details). Also, it goes against the principle of security [hardening](https://en.wikipedia.org/wiki/Hardening_(computing)) where the default configuration should be kept minimal to reduce the attack surface of the system. For all other controller-runtime based components, it is best to implement the diagnostics feature that CAPI has introduced to match the core controllers and also have consistency across all these components. It also removes the current dependency we have on the [kube-rbac-proxy](https://github.com/brancz/kube-rbac-proxy) for capc controller. Overall, there will be no API changes in the cluster spec for any of the components. + +#### **Kube-Apiserver and Kubelet:** + +These components already perform bearer token authentication and RBAC authorization for client requests and they are already configured to allow listening on all interfaces and IP address families. No further action needs to be taken to expose metrics for these components securely. + +#### **Kube-Controller-Manager, Kube-Scheduler and Kube-Proxy:** + +Kube-controller-manager and kube-scheduler already perform bearer token authentication and RBAC authorization whereas kube-proxy does not but all three components listen only on the localhost (127.0.0.1) for client requests. We can document the steps to be followed to configure some proxy as a Daemonset on the cluster which forwards the client requests to the metrics endpoint for each component. The proxy pods must run in the `hostNetwork` so that they can access the loopback interfaces of the corresponding pods. There will be no changes done in EKS Anywhere to configure these for the customers. + +**Documentation Steps:** + +1. Create a cluster role object which gives permissions to get the metrics + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: + - nonResourceURLs: + - "/metrics" + verbs: + - get +``` + +2. Create a cluster role binding object which binds the above cluster role to the service account of the monitoring pod + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-reader-binding +subjects: +- kind: ServiceAccount + name: prometheus-server + namespace: observability +roleRef: + kind: ClusterRole + name: metrics-reader + apiGroup: rbac.authorization.k8s.io +``` + +3. Create a config map object which stores the proxy configuration to route the request to the components + +**HAProxy configuration example:** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: metrics-proxy +data: + haproxy.cfg: | + defaults + mode http + timeout connect 5000ms + timeout client 5000ms + timeout server 5000ms + default-server maxconn 10 + + frontend kube-proxy + bind ${NODE_IP}:10249 + http-request deny if !{ path /metrics } + default_backend kube-proxy + backend kube-proxy + server kube-proxy 127.0.0.1:10249 check + + frontend kube-controller-manager + bind ${NODE_IP}:10257 + http-request deny if !{ path /metrics } + default_backend kube-controller-manager + backend kube-controller-manager + server kube-controller-manager 127.0.0.1:10257 ssl verify none check + + frontend kube-scheduler + bind ${NODE_IP}:10259 + http-request deny if !{ path /metrics } + default_backend kube-scheduler + backend kube-scheduler + server kube-scheduler 127.0.0.1:10259 ssl verify none check +``` + +4. Create a Daemonset object to deploy the proxy so that metrics are exposed on all the nodes + +**HAProxy daemonset example:** + +```yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: metrics-proxy +spec: + selector: + matchLabels: + app: metrics-proxy + template: + metadata: + labels: + app: metrics-proxy + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + hostNetwork: true + containers: + - name: haproxy + image: haproxy:2.9 + env: + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: TOKEN + value: "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + ports: + - name: kube-proxy + containerPort: 10249 + - name: kube-ctrl-mgr + containerPort: 10257 + - name: kube-scheduler + containerPort: 10259 + volumeMounts: + - mountPath: "/usr/local/etc/haproxy" + name: haproxy-config + volumes: + - configMap: + name: metrics-proxy + name: haproxy-config +``` + +5. Verify that the metrics are exposed to the monitoring pods by running the following command from the container + +```bash +export TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) +curl -H "Authorization: Bearer ${TOKEN}" http://{node-IP}:{component-port}/metrics +``` + +If some customer doesn't need to expose the kube-proxy metrics, then the daemonset can be configured to run the proxy pods on only the control plane nodes using [node labels](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#built-in-node-labels) and [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) or using [node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). Since kube-proxy is itself a daemonset that runs on all nodes, exposing its metrics requires running the metrics proxy on all nodes. + +This solution is also extensible as the above steps can also be applied to other external or custom components which don't expose metrics endpoint by default. It also avoids any security risk of exposing metrics to the internet associated with configuring the bind-address for kube-controller-manager and kube-scheduler. This is explained below in the alternate solutions section. + +Another advantage is that it provides flexibility to the customers in choosing any proxy based on their preference. Some of the most popular proxies include [nginx](https://github.com/nginx-proxy/nginx-proxy), [envoy](https://github.com/envoyproxy/envoy), [haproxy](https://github.com/haproxy/haproxy), [traefik](https://github.com/traefik/traefik), etc. We will document an example config for HAProxy and customers can configure it similarly for other proxies. + +One drawback is that it provides a bad User Experience to the customers as they need to configure these additional objects for each of their clusters. But as the number of users which require this feature is not large enough to justify supporting it in EKS Anywhere, this is a good workaround solution to document it to the users. Even kubeadm doesn't support it for the same reason. For more details, check out this [issue](https://github.com/kubernetes/kubeadm/issues/2388#issuecomment-776073834) + +A disadvantage is that prometheus associates metrics with pod names but since the pods are behind a proxy, the proxy's pod name will be used instead for the metrics. (see [issue](https://github.com/prometheus-operator/kube-prometheus/issues/718#issuecomment-776360908) for more details) + +#### **EKS Anywhere Controller Manager and EKS Anywhere Packages:** + +These components do not perform any kind of authentication or authorization for client requests and listen only on the localhost by default. We will implement the diagnostics feature to expose metrics securely on all interfaces. + +#### **Etcdadm Bootstrap Provider Controller Manager and Etcdadm Controller Controller Manager:** + +These components also do not perform any kind of authentication or authorization for client requests and listen only on the localhost by default. We will implement the diagnostics feature to expose metrics securely on all interfaces. + +#### **Capi-Controller-Manager, capi-kubeadm-bootstrap-controller-manager and capi-kubeadm-control-plane-controller-manager:** + +These components already implement the diagnostics feature to expose metrics securely on all interfaces. No further action needs to be taken to expose metrics for these components securely. + +#### **EKS Anywhere supported CAPI providers (capv, capx, capt, capc):** + +For capc, we have kube-rbac-proxy already implemented as a secure way to expose metrics but it listens only on the localhost. We can remove the dependency on kube-rbac-proxy for capc and implement the diagnostics feature. This would enable us to expose metrics securely on all interfaces at diagnostics address `:8443` + +For capv, the diagnostics feature has already been implemented in the [latest](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/tag/v1.9.2) release but our `eks-anywhere-build-tooling` repo points to an [older release](https://github.com/aws/eks-anywhere-build-tooling/blob/main/projects/kubernetes-sigs/cluster-api-provider-vsphere/GIT_TAG) version which does not support the diagnostics feature and defaults to `127.0.0.1` for `--metrics-bind-addr` flag. We would just need to bump the capv version to the latest release version. + +For capx, the diagnostics feature has already been implemented and we also point to the latest release in the `eks-anywhere-build-tooling` repo. No further action needs to be taken to securely expose metrics for capx. + +For capt, it does not perform any kind of authentication or authorization for client requests and listens only on the localhost by default. We can implement the diagnostics feature to expose metrics securely on all interfaces. + +## Implementation Details + +**Diagnostics Feature for all EKS Anywhere, CAPI etcd and CAPI provider-specific controllers:** + +Diagnostics feature - https://main.cluster-api.sigs.k8s.io/tasks/diagnostics + +``` +spec: + containers: + - command: + - /manager + args: + - --diagnostics-address:=`${CAPI_DIAGNOSTICS_ADDRESS:=:8443}` + - --insecure-diagnostics:=false + ports: + - containerPort: 8443 + name: metrics + protocol: TCP + ... +``` + +Add the above args and metrics port to the controller manager deployment in `config/manager/manager.yaml` + +``` +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +``` + +Add above rules to the manager cluster role in `config/rbac/role.yaml` + +``` +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create + +func main() { + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) +} +``` + +Add the diagnostics options capi flags and rbac [markers](https://book.kubebuilder.io/reference/markers/rbac) to the controller binary in `main.go` + +## Testing + +* E2E tests for the diagnostics feature +* Unit tests for any additional utility functions implemented + +## Documentation + +* Add necessary steps on configuring the metrics proxy daemonset in the cluster to the EKS Anywhere docs +* We can reference the [CAPI](https://main.cluster-api.sigs.k8s.io/tasks/diagnostics#scraping-metrics) documentation on diagnostics feature for scraping metrics from the controllers + +## Alternate Solutions Considered + +### Using kube-rbac-proxy for all components + +Using kube-rbac-proxy for all EKS Anywhere components to expose metrics securely by enforcing RBAC policies and authentication. In this approach, metrics requests are routed through the kube-rbac-proxy, which sits between the client and the API server. Kube-rbac-proxy authenticates the client using various authentication mechanisms such as bearer token, client TLS certificates, request header authentication, etc. It then verifies the client's RBAC permissions and only allows access to the metrics endpoint if the client has the necessary privileges. For more details, check out option 2 [here](https://quip-amazon.com/II8XAy90Pq2v/Expose-metrics-of-EKS-A-components#temp:C:fRf4452d35522194e5bb535f4d14) + +This approach enables authentication and RBAC authorization for all the components but requires maintaining an additional upstream dependency. Some Kubernetes system components already do have authn/authz enabled by default and capi also introduced the diagnostics feature recently which is the preferred way of enabling authn/authz for the controller-runtime based components. So using kube-rbac-proxy as an additional layer of security for these components is not necessary. + +Another thing to note is that kube-rbac-proxy project is in alpha stage and may have significant changes in the future. So, it's better to not have a dependency on it in the future if there are better alternatives available. + +### Configurable bind-address for kube-scheduler and kube-controller-manager + +We can allow customers to configure the `--bind-address` flag for these components through the cluster spec to allow listening on all interfaces and IP address families so that prometheus or any other component with appropriate RBAC permissions can scrape the metrics endpoint for these components securely. + +Currently, we cannot make the `--metrics-bind-address` flag configurable for kube-proxy through the cluster spec as CAPI doesn't support configuring kube-proxy configuration. In the future, we can either patch CAPI to enable support for configuring kube-proxy or [disable](https://github.com/kubernetes-sigs/cluster-api/issues/4512#issuecomment-1267092583) installing kube-proxy from CAPI and install it in EKS Anywhere instead. + +**Schema:** + +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: mgmt-cluster +spec: + ... + controlPlaneConfiguration: + ... + controllerManagerExtraArgs: + bindAddress: "0.0.0.0" + schedulerExtraArgs: + bindAddress: "0.0.0.0" +``` + +The main concern with making bind-address configurable is that configuring it to bind on all interfaces might expose the metrics publicly over the internet on a node which has any interface exposed to the internet. In a cluster with single control plane node, binding it to the control plane node ip address would solve the issue but it wouldn't work for HA clusters with multiple control plane nodes which is usually the case in a production environment. Another solution would be to apply [firewall rules](https://github.com/kubernetes/kubeadm/issues/2244#issuecomment-763533964) on every node before binding to 0.0.0.0 but this is not a good idea either. + +Another thing to note is that it is tough to validate the range of IP addresses that bind address can be allowed to be configure with. Even Kubernetes does not do any such validation for these components. Only validation that can be done is that the address is in a proper IPv4/IPv6 format. If a user configures some unreachable address, it would be hard to debug the issue with the component. + +**Implementation:** + +```yaml +controllerManagerExtraArgs: + "bind-address": "0.0.0.0" +schedulerExtraArgs: + "bind-address": "0.0.0.0" +``` + +These flags will be fetched from the cluster spec and added to the [controllerManagerExtraArgs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#controllermanager-flags) and [schedulerExtraArgs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#scheduler-flags) map in the `ClusterConfiguration` object during create and upgrade operations for generating control plane CAPI spec. + +**Testing:** + +* E2E tests will be required to test whether the flags are actually configured for Kubernetes system components + +**Validations:** + +* Validate that the `bind-address` flag is in a proper format similar to how Kubernetes does it [here](https://github.com/kubernetes/kubernetes/blob/f4e246bc93ffb68b33ed67c7896c379efa4207e7/pkg/proxy/apis/config/validation/validation.go#L274) for kube-proxy + +**Documentation:** + +We can add `controlPlaneConfiguration.controllerManagerExtraArgs.bindAddress` and `controlPlaneConfiguration.schedulerExtraArgs.bindAddress` as an optional configuration to our EKS Anywhere docs + +## References + +* https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/ +* https://github.com/kubernetes/kubeadm/issues/1285#issuecomment-459157839 +* https://github.com/kubernetes-sigs/controller-runtime/pull/2407 + diff --git a/designs/images/expose-metrics.png b/designs/images/expose-metrics.png new file mode 100644 index 000000000000..7bb1f8ddec65 Binary files /dev/null and b/designs/images/expose-metrics.png differ diff --git a/docs/config.toml b/docs/config.toml index 69a4a531cb9d..8a1006cc1f09 100644 --- a/docs/config.toml +++ b/docs/config.toml @@ -183,4 +183,10 @@ desc = "Development takes place here!" fullversion = "v0.19" version = "v0.19" docsbranch = "main" -url = "/docs/" +url = "https://anywhere.eks.amazonaws.com" + +[[params.versions]] +fullversion = "v0.18" +version = "v0.18" +docsbranch = "release-0.18" +url = "https://release-0-18.anywhere.eks.amazonaws.com" diff --git a/docs/content/en/docs/clustermgmt/cluster-flux.md b/docs/content/en/docs/clustermgmt/cluster-flux.md index 3b95286f15da..01f6635248fc 100755 --- a/docs/content/en/docs/clustermgmt/cluster-flux.md +++ b/docs/content/en/docs/clustermgmt/cluster-flux.md @@ -11,13 +11,13 @@ description: > ## GitOps Support (optional) -EKS Anywhere supports a [GitOps](https://www.weave.works/technologies/gitops/) workflow for the management of your cluster. +EKS Anywhere supports a [GitOps](https://www.gitops.tech/#what-is-gitops) workflow for the management of your cluster. When you create a cluster with GitOps enabled, EKS Anywhere will automatically commit your cluster configuration to the provided GitHub repository and install a GitOps toolkit on your cluster which watches that committed configuration file. You can then manage the scale of the cluster by making changes to the version controlled cluster configuration file and committing the changes. Once a change has been detected by the GitOps controller running in your cluster, the scale of the cluster will be adjusted to match the committed configuration file. -If you'd like to learn more about GitOps, and the associated best practices, [check out this introduction from Weaveworks](https://www.weave.works/technologies/gitops/). +If you'd like to learn more about GitOps, and the associated best practices, [check out this introduction from Weaveworks](https://www.gitops.tech/#what-is-gitops). >**_NOTE:_** Installing a GitOps controller can be done during cluster creation or through upgrade. In the event that GitOps installation fails, EKS Anywhere cluster creation will continue. diff --git a/docs/content/en/docs/clustermgmt/cluster-terraform.md b/docs/content/en/docs/clustermgmt/cluster-terraform.md index 35dbf7932b50..dc3b286285c5 100644 --- a/docs/content/en/docs/clustermgmt/cluster-terraform.md +++ b/docs/content/en/docs/clustermgmt/cluster-terraform.md @@ -67,7 +67,7 @@ how to scale your EKS Anywhere worker nodes using the Terraform Kubernetes provi 3. Configure the Terraform cluster resource definition generated in step 2 - Set `metadata.generation` as a [computed field](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest#computed-fields). Add the following to your cluster resource configuration ```bash - computed_fields = ["metadata.generated"] + computed_fields = ["metadata.generation"] ``` - Configure the field manager to [force reconcile managed resources](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest#field_manager). Add the following configuration block to your cluster resource: ```bash @@ -79,7 +79,7 @@ how to scale your EKS Anywhere worker nodes using the Terraform Kubernetes provi - Remove the `generation` field from the `metadata` of the cluster - Your Terraform cluster resource should look similar to this: ```bash - computed_fields = ["metadata.generated"] + computed_fields = ["metadata.generation"] field_manager { force_conflicts = true } diff --git a/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md b/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md index b9852a228dbe..207ee1f60001 100644 --- a/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md +++ b/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md @@ -22,7 +22,7 @@ The procedure to upgrade EKS Anywhere clusters in airgapped environments is simi If the previous steps succeeded, all of the required EKS Anywhere dependencies are now present in your local registry. Before you upgrade your EKS Anywhere cluster, configure `registryMirrorConfiguration` in your EKS Anywhere cluster specification with the information for your local registry. For details see the [Registry Mirror Configuration documentation.]({{< relref "../../getting-started/optional/registrymirror/#registry-mirror-cluster-spec" >}}) ->**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of the upgraded node operating system image and hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl" >}}) +>**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of the upgraded node operating system image and hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}) ### Next Steps - [Build upgraded node operating system images for your cluster]({{< relref "../../osmgmt/artifacts/#building-images-for-a-specific-eks-anywhere-version" >}}) diff --git a/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md b/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md index aed83dd67c67..d2551f2f4a42 100755 --- a/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md +++ b/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md @@ -108,7 +108,7 @@ spec: ... ``` ->**_NOTE:_** If you have a custom machine image for your nodes in your cluster config yaml or to upgrade a node or group of nodes to a new operating system version (ie. RHEL 8.7 to RHEL 8.8), you may also need to update your [`TinkerbellDatacenterConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbelldatacenterconfig-fields" >}}) or [`TinkerbellMachineConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbellmachineconfig-fields" >}}) with the new operating system image URL [`osImageURL`]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl" >}}). +>**_NOTE:_** If you have a custom machine image for your nodes in your cluster config yaml or to upgrade a node or group of nodes to a new operating system version (ie. RHEL 8.7 to RHEL 8.8), you may also need to update your [`TinkerbellDatacenterConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbelldatacenterconfig-fields" >}}) or [`TinkerbellMachineConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbellmachineconfig-fields" >}}) with the new operating system image URL [`osImageURL`]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}). and then you will run the [upgrade cluster command]({{< relref "baremetal-upgrades/#upgrade-cluster-command" >}}). diff --git a/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md b/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md index 03f4791cb73c..792d6e5532b5 100644 --- a/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md +++ b/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md @@ -24,6 +24,11 @@ EKS-Anywhere clusters use etcd as the backing store. Taking a snapshot of etcd b Etcd offers a built-in snapshot mechanism. You can take a snapshot using the `etcdctl snapshot save` or `etcdutl snapshot save` command by following the steps given below. +{{% alert title="Note" color="warning" %}} +The following commands use ec2-user as the username. For EKS Anywhere on vSphere, Bare Metal, and Snow, the default username is ec2-user. For EKS Anywhere on Apache CloudStack, the default username is capc. +For EKS Anywhere on Nutanix, the default username is eksa. The default username cannot be changed. +{{% /alert %}} + 1. Login to any one of the etcd VMs ```bash ssh -i $PRIV_KEY ec2-user@$ETCD_VM_IP @@ -93,6 +98,7 @@ scp -i $PRIV_KEY snapshot.db ec2-user@$ETCD_VM_IP:/home/ec2-user 2. To run the etcdctl or etcdutl snapshot restore command, you need to provide the following configuration parameters: * name: This is the name of the etcd member. The value of this parameter should match the value used while starting the member. This can be obtained by running: ```bash +sudo su export ETCD_NAME=$(cat /etc/etcd/etcd.env | grep ETCD_NAME | awk -F'=' '{print $2}') ``` * initial-advertise-peer-urls: This is the advertise peer URL with which this etcd member was configured. It should be the exact value with which this etcd member was started. This can be obtained by running: diff --git a/docs/content/en/docs/clustermgmt/observability/expose-metrics.md b/docs/content/en/docs/clustermgmt/observability/expose-metrics.md new file mode 100644 index 000000000000..a1951b2258cc --- /dev/null +++ b/docs/content/en/docs/clustermgmt/observability/expose-metrics.md @@ -0,0 +1,175 @@ +--- +title: "Expose metrics for EKS Anywhere components" +linkTitle: "Expose metrics" +weight: 100 +date: 2024-04-06 +description: > + Expose metrics for EKS Anywhere components +--- + +Some Kubernetes system components like kube-controller-manager, kube-scheduler and kube-proxy expose metrics only on the localhost by default. In order to expose metrics for these components so that other monitoring systems like Prometheus can scrape them, you can deploy a proxy as a Daemonset on the host network of the nodes. The proxy pods also need to be configured with control plane tolerations so that they can be scheduled on the control plane nodes. + +### Configure Proxy + +To configure a proxy for exposing metrics on an EKS Anywhere cluster, you can perform the following steps: + +1. Create a config map to store the proxy configuration. + + Below is an example ConfigMap if you use HAProxy as the proxy server. + ```bash + cat << EOF | kubectl apply -f - + apiVersion: v1 + kind: ConfigMap + metadata: + name: metrics-proxy + data: + haproxy.cfg: | + defaults + mode http + timeout connect 5000ms + timeout client 5000ms + timeout server 5000ms + default-server maxconn 10 + + frontend kube-proxy + bind \${NODE_IP}:10249 + http-request deny if !{ path /metrics } + default_backend kube-proxy + backend kube-proxy + server kube-proxy 127.0.0.1:10249 check + + frontend kube-controller-manager + bind \${NODE_IP}:10257 + http-request deny if !{ path /metrics } + default_backend kube-controller-manager + backend kube-controller-manager + server kube-controller-manager 127.0.0.1:10257 ssl verify none check + + frontend kube-scheduler + bind \${NODE_IP}:10259 + http-request deny if !{ path /metrics } + default_backend kube-scheduler + backend kube-scheduler + server kube-scheduler 127.0.0.1:10259 ssl verify none check + EOF + ``` + +2. Create a daemonset for the proxy and mount the config map volume onto the proxy pods. + + Below is an example configuration for the HAProxy daemonset. + ```bash + cat << EOF | kubectl apply -f - + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: metrics-proxy + spec: + selector: + matchLabels: + app: metrics-proxy + template: + metadata: + labels: + app: metrics-proxy + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + hostNetwork: true + containers: + - name: haproxy + image: public.ecr.aws/eks-anywhere/kubernetes-sigs/kind/haproxy:v0.20.0-eks-a-54 + env: + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + ports: + - name: kube-proxy + containerPort: 10249 + - name: kube-ctrl-mgr + containerPort: 10257 + - name: kube-scheduler + containerPort: 10259 + volumeMounts: + - mountPath: "/usr/local/etc/haproxy" + name: haproxy-config + volumes: + - configMap: + name: metrics-proxy + name: haproxy-config + EOF + ``` + +### Configure Client Permissions + +1. Create a new cluster role for the client to access the metrics endpoint of the components. + ```bash + cat << EOF | kubectl apply -f - + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: metrics-reader + rules: + - nonResourceURLs: + - "/metrics" + verbs: + - get + EOF + ``` + +2. Create a new cluster role binding to bind the above cluster role to the client pod's service account. + + ```bash + cat << EOF | kubectl apply -f - + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: metrics-reader-binding + subjects: + - kind: ServiceAccount + name: default + namespace: default + roleRef: + kind: ClusterRole + name: metrics-reader + apiGroup: rbac.authorization.k8s.io + EOF + ``` + +3. Verify that the metrics are exposed to the client pods by running the following commands: + ```bash + cat << EOF | kubectl apply -f - + apiVersion: v1 + kind: Pod + metadata: + name: test-pod + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + containers: + - command: + - /bin/sleep + - infinity + image: curlimages/curl:latest + name: test-container + env: + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + EOF + ``` + + ```bash + kubectl exec -it test-pod -- sh + export TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + curl -H "Authorization: Bearer ${TOKEN}" "http://${NODE_IP}:10257/metrics" + curl -H "Authorization: Bearer ${TOKEN}" "http://${NODE_IP}:10259/metrics" + curl -H "Authorization: Bearer ${TOKEN}" "http://${NODE_IP}:10249/metrics" + ``` \ No newline at end of file diff --git a/docs/content/en/docs/clustermgmt/observability/overview.md b/docs/content/en/docs/clustermgmt/observability/overview.md index 5f36ed9c60b5..f4674a860162 100644 --- a/docs/content/en/docs/clustermgmt/observability/overview.md +++ b/docs/content/en/docs/clustermgmt/observability/overview.md @@ -17,4 +17,5 @@ AWS offers comprehensive monitoring, logging, alarming, and dashboard capabiliti 1. [Verify EKS Anywhere cluster status]({{< relref "./cluster-verify" >}}) 1. [Use the EKS Connector to view EKS Anywhere clusters and resources in the EKS console]({{< relref "./cluster-connect" >}}) 1. [Use Fluent Bit and Container Insights to send metrics and logs to CloudWatch]({{< relref "./fluentbit-logging" >}}) -1. [Use ADOT to send metrics to AMP and AMG](https://aws.amazon.com/blogs/mt/using-curated-packages-and-aws-managed-open-source-services-to-observe-your-on-premise-kubernetes-environment/) \ No newline at end of file +1. [Use ADOT to send metrics to AMP and AMG](https://aws.amazon.com/blogs/mt/using-curated-packages-and-aws-managed-open-source-services-to-observe-your-on-premise-kubernetes-environment/) +1. [Expose metrics for EKS Anywhere components]({{< relref "./expose-metrics" >}}) \ No newline at end of file diff --git a/docs/content/en/docs/clustermgmt/security/best-practices.md b/docs/content/en/docs/clustermgmt/security/best-practices.md index 277f99306cc2..2b36ddb461a9 100644 --- a/docs/content/en/docs/clustermgmt/security/best-practices.md +++ b/docs/content/en/docs/clustermgmt/security/best-practices.md @@ -98,8 +98,7 @@ EKS Anywhere stores sensitive information, like the vSphere credentials and GitH These secret objects are namespaced, for example in the `eksa-system` and `flux-system` namespace, and limiting access to the sensitive namespaces will ensure that these secrets will not be exposed. Additionally, limit access to the underlying node. Access to the node could allow access to the secret content. -EKS Anywhere does not currently support encryption-at-rest for Kubernetes secrets. -EKS Anywhere support for [Key Management Services (KMS)](https://kubernetes.io/docs/tasks/administer-cluster/kms-provider/) is planned. +EKS Anywhere also supports encryption-at-rest for Kubernetes secrets. See [etcd encryption]({{< relref "../../getting-started/optional/etcdencryption" >}}) for more details. ### The EKS Anywhere `kubeconfig` file diff --git a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md index 001155f83c26..57d878594080 100644 --- a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md +++ b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md @@ -84,8 +84,9 @@ ${IMAGE_ID} tmp-cert-renew \ {{< tab header="Ubuntu or RHEL" lang="bash" >}} sudo etcdctl --cacert=/etc/etcd/pki/ca.crt --cert=/etc/etcd/pki/etcdctl-etcd-client.crt --key=/etc/etcd/pki/etcdctl-etcd-client.key member list {{< /tab >}} + {{< tab header="Bottlerocket" lang="bash" >}} -ETCD_CONTAINER_ID=$(ctr -n k8s.io c ls | grep -w "etcd-io" | cut -d " " -f1) +ETCD_CONTAINER_ID=$(ctr -n k8s.io c ls | grep -w "etcd-io" | cut -d " " -f1 | tail -1) ctr -n k8s.io t exec -t --exec-id etcd ${ETCD_CONTAINER_ID} etcdctl \ --cacert=/var/lib/etcd/pki/ca.crt \ --cert=/var/lib/etcd/pki/server.crt \ @@ -94,15 +95,17 @@ ctr -n k8s.io t exec -t --exec-id etcd ${ETCD_CONTAINER_ID} etcdctl \ {{< /tab >}} {{< /tabpane >}} +- If the above command fails due to multiple etcd containers existing, then navigate to `/var/log/containers/etcd` and confirm which container was running during the issue timeframe (this container would be the 'stale' container). Delete this older etcd once you have renewed the certs and the new etcd container will be able to enter a functioning state. If you don’t do this, the two etcd containers will stay indefinitely and the etcd will not recover. + 3. Repeat the above steps for all etcd nodes. -4. Save the `api-server-etcd-client` `crt` and `key` file as a Secret from one of the etcd nodes, so the `key` can be picked up by new control plane nodes. You will also need them when renewing the certificates on control plane nodes. See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-config-file/#edit-secret) for details on editing Secrets. +4. Save the `apiserver-etcd-client` `crt` and `key` file as a Secret from one of the etcd nodes, so the `key` can be picked up by new control plane nodes. You will also need them when renewing the certificates on control plane nodes. See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-config-file/#edit-secret) for details on editing Secrets. ```bash -kubectl edit secret ${cluster-name}-api-server-etcd-client -n eksa-system +kubectl edit secret ${cluster-name}-apiserver-etcd-client -n eksa-system ``` {{% alert title="Note" color="primary" %}} -For Bottlerocket nodes, the `key` of `api-server-etcd-client` is `server-etcd.client.crt` instead of `api-server-etcd-client.crt`. +For Bottlerocket nodes, the `key` of `apiserver-etcd-client` is `server-etcd.client.crt` instead of `apiserver-etcd-client.crt`. {{% /alert %}} #### Control plane nodes @@ -151,7 +154,17 @@ ${IMAGE_ID} tmp-cert-renew \ {{< /tab >}} {{< /tabpane >}} -3. If you have external etcd nodes, manually replace the `api-server-etcd-client.crt` and `api-server-etcd-client.key` file in `/etc/kubernetes/pki` (or `/var/lib/kubeadm/pki` in Bottlerocket) folder with the files you saved from any etcd node. +3. If you have external etcd nodes, manually replace the `server-etcd-client.crt` and `apiserver-etcd-client.key` files in the `/etc/kubernetes/pki` (or `/var/lib/kubeadm/pki` in Bottlerocket) folder with the files you saved from any etcd node. + + - **For Bottlerocket**: + + ``` + cp apiserver-etcd-client.key /tmp/ + cp server-etcd-client.crt /tmp/ + sudo sheltie + cp /run/host-containerd/io.containerd.runtime.v2.task/default/admin/rootfs/tmp/apiserver-etcd-client.key /var/lib/kubeadm/pki/ + cp /run/host-containerd/io.containerd.runtime.v2.task/default/admin/rootfs/tmp/server-etcd-client.crt /var/lib/kubeadm/pki/ + ``` 4. Restart static control plane pods. @@ -159,8 +172,8 @@ ${IMAGE_ID} tmp-cert-renew \ - **For Bottlerocket**: re-enable the static pods: ``` - apiclient get | jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=false - apiclient get | jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=true` + apiclient get | apiclient exec admin jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=false + apiclient get | apiclient exec admin jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=true ``` You can verify Pods restarting by running `kubectl` from your Admin machine. @@ -168,3 +181,115 @@ ${IMAGE_ID} tmp-cert-renew \ 5. Repeat the above steps for all control plane nodes. You can similarly use the above steps to rotate a single certificate instead of all certificates. + +### Kubelet +If `kubeadm certs check-expiration` is happy, but kubectl commands against the cluster fail with `x509: certificate has expired or is not yet valid`, then it's likely that the kubelet certs did not rotate. To rotate them, SSH back into one of the control plane nodes and do the following. + +``` +# backup certs +cd /var/lib/kubelet +cp -r pki pki.bak +rm pki/* + +systemctl restart kubelet +``` + +In some cases, the certs might not regenerate and kubelet will fail to start due to a missing `kubelet-client-current.pem`. If this happens, run the following commands: + +{{< tabpane >}} +{{< tab header="Ubuntu or RHEL" lang="bash" >}} +cat /var/lib/kubeadm/admin.conf | grep client-certificate-data: | sed 's/^.*: //' | base64 -d > /var/lib/kubelet/pki/kubelet-client-current.pem + +cat /var/lib/kubeadm/admin.conf | grep client-key-data: | sed 's/^.*: //' | base64 -d >> /var/lib/kubelet/pki/kubelet-client-current.pem + +systemctl restart kubelet + +{{< /tab >}} +{{< tab header="Bottlerocket" lang="bash" >}} +cat /var/lib/kubeadm/admin.conf | grep client-certificate-data: | apiclient exec admin sed 's/^.*: //' | base64 -d > /var/lib/kubelet/pki/kubelet-client-current.pem + +cat /var/lib/kubeadm/admin.conf | grep client-key-data: | apiclient exec admin sed 's/^.*: //' | base64 -d >> /var/lib/kubelet/pki/kubelet-client-current.pem + +systemctl restart kubelet + +{{< /tab >}} +{{< /tabpane >}} + +### Post Renewal +Once all the certificates are valid, verify the kcp object on the affected cluster(s) is not paused by running `kubectl describe kcp -n eksa-system | grep cluster.x-k8s.io/paused`. If it is paused, then this usually indicates an issue with the etcd cluster. Check the logs for pods under the `etcdadm-controller-system` namespace for any errors. +If the logs indicate an issue with the etcd endpoints, then you need to update `spec.clusterConfiguration.etcd.endpoints` in the cluster's `kubeadmconfig` resource: `kubectl edit kcp -n eksa-system` + +Example: +``` +etcd: + external: + caFile: /var/lib/kubeadm/pki/etcd/ca.crt + certFile: /var/lib/kubeadm/pki/server-etcd-client.crt + endpoints: + - https://xxx.xxx.xxx.xxx:2379 + - https://xxx.xxx.xxx.xxx:2379 + - https://xxx.xxx.xxx.xxx:2379 +``` + +### What do I do if my local kubeconfig has expired? + +Your local kubeconfig, used to interact with the cluster, contains a certificate that expires after 1 year. When you rotate cluster certificates, a new kubeconfig with a new certificate is created as a Secret in the cluster. If you do not retrieve the new kubeconfig and your local kubeconfig certificate expires, you will receive the following error: + +``` +Error: Couldn't get current Server API group list: the server has asked for the client to provide credentials error: you must be logged in to the server. +This error typically occurs when the cluster certificates have been renewed or extended during the upgrade process. To resolve this issue, you need to update your local kubeconfig file with the new cluster credentials. +``` + +You can extract your new kubeconfig using the following steps. + +1. You can extract your new kubeconfig by SSHing to one of the Control Plane nodes, exporting kubeconfig from the secret object, and copying kubeconfig file to `/tmp` directory, as shown here: + +``` +ssh -i @ # USER_NAME should be ec2-user for bottlerocket, ubuntu for Ubuntu ControlPlane machine Operating System + +``` + +{{< tabpane >}} +{{< tab header="Ubuntu or RHEL" lang="bash" >}} + +export CLUSTER_NAME="" + +cat /var/lib/kubeadm/admin.conf +export KUBECONFIG="/var/lib/kubeadm/admin.conf" + +kubectl get secret ${CLUSTER_NAME}-kubeconfig -n eksa-system -o yaml -o=jsonpath="{.data.value}" | base64 --decode > /tmp/user-admin.kubeconfig + +{{< /tab >}} + +{{< tab header="Bottlerocket" lang="bash" >}} + +# You would need to be in the admin container when you ssh to the Bottlerocket machine +# open a root shell +sudo sheltie + +cat /var/lib/kubeadm/admin.conf + +cat /var/lib/kubeadm/admin.conf > /run/host-containerd/io.containerd.runtime.v2.task/default/admin/rootfs/tmp/kubernetes-admin.kubeconfig +exit # exit from the sudo sheltie container + +export CLUSTER_NAME="" +export KUBECONFIG="/tmp/kubernetes-admin.kubeconfig" +kubectl get secret ${CLUSTER_NAME}-kubeconfig -n eksa-system -o yaml -o=jsonpath="{.data.value}" | base64 --decode > /tmp/user-admin.kubeconfig +exit # exit from the Control Plane Machine + +{{< /tab >}} +{{< /tabpane >}} +Note: Install kubectl on the Control Plane Machine using the instructions [here](https://anywhere.eks.amazonaws.com/docs/getting-started/install/#manually-macos-and-linux) + +2. From your admin machine, download the kubeconfig file from the ControlPlane node and use it to access your Kubernetes Cluster. + +``` +ssh + +export CONTROLPLANE_IP="" +sftp -i @${CONTROLPLANE_IP}:/tmp/user-admin.kubeconfig . # USER_NAME should be ec2-user for bottlerocket, ubuntu for Ubuntu ControlPlane machine + +ls -ltr +export KUBECONFIG="user-admin.kubeconfig" + +kubectl get pods diff --git a/docs/content/en/docs/clustermgmt/support/purchase-subscription.md b/docs/content/en/docs/clustermgmt/support/purchase-subscription.md index 4eadf44b9be5..e5aa699a15e8 100644 --- a/docs/content/en/docs/clustermgmt/support/purchase-subscription.md +++ b/docs/content/en/docs/clustermgmt/support/purchase-subscription.md @@ -184,7 +184,7 @@ aws eks tag-resource \ ## Delete Subscriptions ->**_NOTE_** Only inactive subscriptions can be deleted. Deleting inactive subscriptions removes them from the AWS Management Console view and API responses. +>**_NOTE_** Only inactive subscriptions can be deleted. Deleting inactive subscriptions removes them from the AWS Management Console view and API responses. To delete any Active Subscriptions, please create a Support Case with AWS Support team. ### AWS Management Console diff --git a/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html b/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html index a3f29b337ea7..6e287ea908a5 100644 --- a/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html +++ b/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html @@ -8,29 +8,29 @@ ### clusterNetwork.cniConfig (required) CNI plugin configuration. Supports `cilium`. -### clusterNetwork.cniConfig.cilium.policyEnforcementMode +### clusterNetwork.cniConfig.cilium.policyEnforcementMode (optional) Optionally specify a policyEnforcementMode of `default`, `always` or `never`. -### clusterNetwork.cniConfig.cilium.egressMasqueradeInterfaces +### clusterNetwork.cniConfig.cilium.egressMasqueradeInterfaces (optional) Optionally specify a network interface name or interface prefix used for masquerading. See EgressMasqueradeInterfaces option. -### clusterNetwork.cniConfig.cilium.skipUpgrade +### clusterNetwork.cniConfig.cilium.skipUpgrade (optional) When true, skip Cilium maintenance during upgrades. Also see Use a custom CNI. -### clusterNetwork.cniConfig.cilium.routingMode +### clusterNetwork.cniConfig.cilium.routingMode (optional) Optionally specify the routing mode. Accepts `default` and `direct`. Also see RoutingMode option. -### clusterNetwork.cniConfig.cilium.ipv4NativeRoutingCIDR +### clusterNetwork.cniConfig.cilium.ipv4NativeRoutingCIDR (optional) Optionally specify the CIDR to use when RoutingMode is set to direct. When specified, Cilium assumes networking for this CIDR is preconfigured and hands traffic destined for that range to the Linux network stack without applying any SNAT. -### clusterNetwork.cniConfig.cilium.ipv6NativeRoutingCIDR +### clusterNetwork.cniConfig.cilium.ipv6NativeRoutingCIDR (optional) Optionally specify the IPv6 CIDR to use when RoutingMode is set to direct. When specified, Cilium assumes networking for this CIDR is preconfigured and hands traffic destined for that range to the Linux network stack without diff --git a/docs/content/en/docs/getting-started/airgapped/_index.md b/docs/content/en/docs/getting-started/airgapped/_index.md index e2de6d3a5926..b11a375786f7 100644 --- a/docs/content/en/docs/getting-started/airgapped/_index.md +++ b/docs/content/en/docs/getting-started/airgapped/_index.md @@ -39,7 +39,7 @@ The process for preparing your airgapped environment for EKS Anywhere is summari If the previous steps succeeded, all of the required EKS Anywhere dependencies are now present in your local registry. Before you create your EKS Anywhere cluster, configure `registryMirrorConfiguration` in your EKS Anywhere cluster specification with the information for your local registry. For details see the [Registry Mirror Configuration documentation.]({{< relref "../../getting-started/optional/registrymirror/#registry-mirror-cluster-spec" >}}) ->**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of your node operating system image and the hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../baremetal/bare-spec/#osimageurl" >}}) +>**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of your node operating system image and the hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../baremetal/bare-spec/#osimageurl-optional" >}}) ### Next Steps - Review EKS Anywhere [cluster networking requirements]({{< relref "../ports" >}}) diff --git a/docs/content/en/docs/getting-started/airgapped/airgap-steps.md b/docs/content/en/docs/getting-started/airgapped/airgap-steps.md index 7b12e53e0c0e..d19b66df019f 100644 --- a/docs/content/en/docs/getting-started/airgapped/airgap-steps.md +++ b/docs/content/en/docs/getting-started/airgapped/airgap-steps.md @@ -18,7 +18,7 @@ toc_hide: true 1. Set up a local registry mirror to host the downloaded EKS Anywhere images and configure your Admin machine with the certificates and authentication information if your registry requires it. For details, refer to the [Registry Mirror Configuration documentation.]({{< relref "../../getting-started/optional/registrymirror/#configure-local-registry-mirror" >}}) -1. Import images to the local registry mirror using the following command. Set `REGISTRY_MIRROR_URL` to the url of the local registry mirror you created in the previous step. This command may take several minutes to complete. To monitor the progress of the command, you can run with the `-v 6` command line argument. +1. Import images to the local registry mirror using the following command. Set `REGISTRY_MIRROR_URL` to the url of the local registry mirror you created in the previous step. This command may take several minutes to complete. To monitor the progress of the command, you can run with the `-v 6` command line argument. When using self-signed certificates for your registry, you should run with the `--insecure` command line argument to indicate skipping TLS verification while pushing helm charts and bundles. ```bash export REGISTRY_MIRROR_URL= ``` @@ -38,6 +38,8 @@ toc_hide: true The `copy packages` command uses the credentials in your docker config file. So you must `docker login` to the source registries and the destination registry before running the command. + When using self-signed certificates for your registry, you should run with the `--dst-insecure` command line argument to indicate skipping TLS verification while copying curated packages. + ```bash eksctl anywhere copy packages \ ${REGISTRY_MIRROR_URL}/curated-packages \ diff --git a/docs/content/en/docs/getting-started/baremetal/bare-spec.md b/docs/content/en/docs/getting-started/baremetal/bare-spec.md index 8e5a9d05515d..5b96f402637f 100644 --- a/docs/content/en/docs/getting-started/baremetal/bare-spec.md +++ b/docs/content/en/docs/getting-started/baremetal/bare-spec.md @@ -18,7 +18,8 @@ The following additional optional configuration can also be included: * [IAM Authenticator]({{< relref "../optional/iamauth.md" >}}) * [OIDC]({{< relref "../optional/oidc.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) To generate your own cluster configuration, follow instructions from the [Create Bare Metal cluster]({{< relref "./baremetal-getstarted" >}}) section and modify it using descriptions below. For information on how to add cluster configuration settings to this file for advanced node configuration, see [Advanced Bare Metal cluster configuration]({{< relref "#advanced-bare-metal-cluster-configuration" >}}). @@ -121,7 +122,7 @@ the control plane nodes for kube-apiserver loadbalancing. ### controlPlaneConfiguration.machineGroupRef (required) Refers to the Kubernetes object with Tinkerbell-specific configuration for your nodes. See `TinkerbellMachineConfig Fields` below. -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint (For k8s versions prior to 1.24, `node-role.kubernetes.io/master`. For k8s versions 1.24+, `node-role.kubernetes.io/control-plane`). The default control plane components will tolerate the provided taints. @@ -132,29 +133,29 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with the control plane configuration will cause new nodes to be rolled out, replacing the existing nodes. -#### controlPlaneConfiguration.upgradeRolloutStrategy +#### controlPlaneConfiguration.upgradeRolloutStrategy (optional) Configuration parameters for upgrade strategy. -#### controlPlaneConfiguration.upgradeRolloutStrategy.type +#### controlPlaneConfiguration.upgradeRolloutStrategy.type (optional) Default: `RollingUpdate` Type of rollout strategy. Supported values: `RollingUpdate`,`InPlace`. >**_NOTE:_** The upgrade rollout strategy type must be the same for all control plane and worker nodes. -#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate +#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate (optional) Configuration parameters for customizing rolling upgrade behavior. >**_NOTE:_** The rolling update parameters can only be configured if `upgradeRolloutStrategy.type` is `RollingUpdate`. -#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate.maxSurge +#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate.maxSurge (optional) Default: 1 This can not be 0 if maxUnavailable is 0. @@ -163,83 +164,83 @@ The maximum number of machines that can be scheduled above the desired number of Example: When this is set to n, the new worker node group can be scaled up immediately by n when the rolling upgrade starts. Total number of machines in the cluster (old + new) never exceeds (desired number of machines + n). Once scale down happens and old machines are brought down, the new worker node group can be scaled up further ensuring that the total number of machines running at any time does not exceed the desired number of machines + n. -### controlPlaneConfiguration.skipLoadBalancerDeployment +### controlPlaneConfiguration.skipLoadBalancerDeployment (optional) Optional field to skip deploying the control plane load balancer. Make sure your infrastructure can handle control plane load balancing when you set this field to true. In most cases, you should not set this field to true. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with Tinkerbell-specific configuration. See `TinkerbellDatacenterConfig Fields` below. ### kubernetesVersion (required) The Kubernetes version you want to use for your cluster. Supported values: `1.28`, `1.27`, `1.26`, `1.25`, `1.24` -### managementCluster +### managementCluster (required) Identifies the name of the management cluster. If your cluster spec is for a standalone or management cluster, this value is the same as the cluster name. -### workerNodeGroupConfigurations +### workerNodeGroupConfigurations (optional) This takes in a list of node groups that you can define for your workers. You can omit `workerNodeGroupConfigurations` when creating Bare Metal clusters. If you omit `workerNodeGroupConfigurations`, control plane nodes will not be tainted and all pods will run on the control plane nodes. This mechanism can be used to deploy Bare Metal clusters on a single server. You can also run multi-node Bare Metal clusters without `workerNodeGroupConfigurations`. >**_NOTE:_** Empty `workerNodeGroupConfigurations` is not supported when Kubernetes version <= 1.21. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations[*].count (optional) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with Tinkerbell-specific configuration for your nodes. See `TinkerbellMachineConfig Fields` below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration +### workerNodeGroupConfigurations[*].autoscalingConfiguration (optional) Configuration parameters for Cluster Autoscaler. >**_NOTE:_** Autoscaling configuration is not supported when using the `InPlace` upgrade rollout strategy. -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. [Supported values]({{< relref "../../concepts/support-versions/#kubernetes-versions" >}}): `1.28`, `1.27`, `1.26`, `1.25`, `1.24` Must be less than or equal to the cluster `kubernetesVersion` defined at the root level of the cluster spec. The worker node kubernetesVersion must be no more than two minor Kubernetes versions lower than the cluster control plane's Kubernetes version. Removing `workerNodeGroupConfiguration.kubernetesVersion` will trigger an upgrade of the node group to the `kubernetesVersion` defined at the root level of the cluster spec. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy (optional) Configuration parameters for upgrade strategy. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.type +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.type (optional) Default: `RollingUpdate` Type of rollout strategy. Supported values: `RollingUpdate`,`InPlace`. >**_NOTE:_** The upgrade rollout strategy type must be the same for all control plane and worker nodes. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.rollingUpdate (optional) Configuration parameters for customizing rolling upgrade behavior. >**_NOTE:_** The rolling update parameters can only be configured if `upgradeRolloutStrategy.type` is `RollingUpdate`. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxSurge +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.rollingUpdate.maxSurge (optional) Default: 1 This can not be 0 if maxUnavailable is 0. @@ -248,7 +249,7 @@ The maximum number of machines that can be scheduled above the desired number of Example: When this is set to n, the new worker node group can be scaled up immediately by n when the rolling upgrade starts. Total number of machines in the cluster (old + new) never exceeds (desired number of machines + n). Once scale down happens and old machines are brought down, the new worker node group can be scaled up further ensuring that the total number of machines running at any time does not exceed the desired number of machines + n. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxUnavailable +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.rollingUpdate.maxUnavailable (optional) Default: 0 This can not be 0 if MaxSurge is 0. @@ -259,17 +260,17 @@ Example: When this is set to n, the old worker node group can be scaled down by ## TinkerbellDatacenterConfig Fields -### tinkerbellIP +### tinkerbellIP (required) Required field to identify the IP address of the Tinkerbell service. This IP address must be a unique IP in the network range that does not conflict with other IPs. Once the Tinkerbell services move from the Admin machine to run on the target cluster, this IP address makes it possible for the stack to be used for future provisioning needs. When separate management and workload clusters are supported in Bare Metal, the IP address becomes a necessity. -### osImageURL +### osImageURL (optional) Optional field to replace the default Bottlerocket operating system. EKS Anywhere can only auto-import Bottlerocket. In order to use Ubuntu or RHEL see [building baremetal node images]({{< relref "../../osmgmt/artifacts/#build-bare-metal-node-images" >}}). This field is also useful if you want to provide a customized operating system image or simply host the standard image locally. To upgrade a node or group of nodes to a new operating system version (ie. RHEL 8.7 to RHEL 8.8), modify this field to point to the new operating system image URL and run [upgrade cluster command]({{< relref "../../clustermgmt/cluster-upgrades/baremetal-upgrades/#upgrade-cluster-command" >}}). The `osImageURL` must contain the `Cluster.Spec.KubernetesVersion` or `Cluster.Spec.WorkerNodeGroupConfiguration[].KubernetesVersion` version (in case of modular upgrade). For example, if the Kubernetes version is 1.24, the `osImageURL` name should include 1.24, 1_24, 1-24 or 124. -### hookImagesURLPath +### hookImagesURLPath (optional) Optional field to replace the HookOS image. This field is useful if you want to provide a customized HookOS image or simply host the standard image locally. See [Artifacts]({{< relref "../../osmgmt/artifacts/#hookos-kernel-and-initial-ramdisk-for-bare-metal" >}}) for details. @@ -290,19 +291,19 @@ my-web-server └── ubuntu-v1.23.7-eks-a-12-amd64.gz ``` -### skipLoadBalancerDeployment +### skipLoadBalancerDeployment (optional) Optional field to skip deploying the default load balancer for Tinkerbell stack. EKS Anywhere for Bare Metal uses `kube-vip` load balancer by default to expose the Tinkerbell stack externally. You can disable this feature by setting this field to `true`. ->**_NOTE:_** If you skip load balancer deployment, you will have to ensure that the Tinkerbell stack is available at [tinkerbellIP]({{< relref "#tinkerbellip" >}}) once the cluster creation is finished. One way to achieve this is by using the [MetalLB]({{< relref "../../packages/metallb" >}}) package. +>**_NOTE:_** If you skip load balancer deployment, you will have to ensure that the Tinkerbell stack is available at [tinkerbellIP]({{< relref "#tinkerbellip-required" >}}) once the cluster creation is finished. One way to achieve this is by using the [MetalLB]({{< relref "../../packages/metallb" >}}) package. ## TinkerbellMachineConfig Fields In the example, there are `TinkerbellMachineConfig` sections for control plane (`my-cluster-name-cp`) and worker (`my-cluster-name`) machine groups. The following fields identify information needed to configure the nodes in each of those groups. >**_NOTE:_** Currently, you can only have one machine group for all machines in the control plane, although you can have multiple machine groups for the workers. > -### hardwareSelector +### hardwareSelector (optional) Use fields under `hardwareSelector` to add key/value pair labels to match particular machines that you identified in the CSV file where you defined the machines in your cluster. Choose any label name you like. For example, if you had added the label `node=cp-machine` to the machines listed in your CSV file that you want to be control plane nodes, the following `hardwareSelector` field would cause those machines to be added to the control plane: @@ -331,7 +332,7 @@ See TinkerbellTemplateConfig fields below. EKS Anywhere will generate default templates based on `osFamily` during the `create` command. You can override this default template by providing your own template here. -### users +### users (optional) The name of the user you want to configure to access your virtual machines through SSH. The default is `ec2-user`. @@ -471,7 +472,7 @@ spec: Pay special attention to the `BOOTCONFIG_CONTENTS` environment section below if you wish to set up console redirection for the kernel and systemd. If you are only using a direct attached monitor as your primary display device, no additional configuration is needed here. -However, if you need all boot output to be shown via a server’s serial console for example, extra configuration should be provided inside `BOOTCONFIG_CONTENTS`. +However, if you need all boot output to be shown via a server's serial console for example, extra configuration should be provided inside `BOOTCONFIG_CONTENTS`. An empty `kernel {}` key is provided below in the example; inside this key is where you will specify your console devices. You may specify multiple comma delimited console devices in quotes to a console key as such: `console = "tty0", "ttyS0,115200n8"`. diff --git a/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md b/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md index 1728608f9cb0..e2c45c3ef580 100644 --- a/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md +++ b/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md @@ -213,7 +213,7 @@ Follow these steps if you want to use your initial cluster to create and manage > ``` > * For creating multiple workload clusters, it is essential that the hardware labels and selectors defined for a given workload cluster are unique to that workload cluster. For instance, for an EKS Anywhere cluster named `eksa-workload1`, the hardware that is assigned for this cluster should have labels that are only going to be used for this cluster like `type=eksa-workload1-cp` and `type=eksa-workload1-worker`. Another workload cluster named `eksa-workload2` can have labels like `type=eksa-workload2-cp` and `type=eksa-workload2-worker`. Please note that even though labels can be arbitrary, they need to be unique for each workload cluster. Not specifying unique cluster labels can cause cluster creations to behave in unexpected ways which may lead to unsuccessful creations and unstable clusters. - See the [hardware selectors]({{< relref "./bare-spec/#hardwareselector" >}}) section for more information + See the [hardware selectors]({{< relref "./bare-spec/#hardwareselector-optional" >}}) section for more information 1. Check the workload cluster: diff --git a/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md b/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md index 3dab38e17ae7..c24c070b9160 100644 --- a/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md +++ b/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md @@ -101,7 +101,7 @@ eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-2 47s The following shows snippets from the `tasks.bmc` output that represent the three tasks: Power Off, enable network boot, and Power On. ```bash -kubectl describe tasks.bmc -n eksa-system eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-0 +kubectl describe tasks.bmc -n eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-0 ``` ``` ... @@ -115,7 +115,7 @@ Status: ``` ```bash -kubectl describe tasks.bmc -n eksa-system eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-1 +kubectl describe tasks.bmc -n eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-1 ``` ``` ... @@ -132,7 +132,7 @@ Status: ``` ```bash -kubectl describe tasks.bmc -n eksa-system eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-2 +kubectl describe tasks.bmc -n eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-2 ``` ``` Task: @@ -243,7 +243,7 @@ NAME STATUS ROLES AGE VERSION INTERNAL eksa-da04 Ready control-plane,master 9m5s v1.22.10-eks-7dc61e8 10.80.30.23 ``` ```bash -kubectl get logs -n eksa-system | grep hegel +kubectl get pods -n eksa-system | grep hegel ``` ``` hegel-n7ngs diff --git a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md index f2c82abdbd46..b21ed6912eed 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md @@ -17,7 +17,8 @@ The following additional optional configuration can also be included: * [GitOps]({{< relref "../optional/gitops.md" >}}) * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml @@ -189,7 +190,7 @@ creation process are [here]({{< relref "./cloudstack-prereq/." >}}) ### controlPlaneConfiguration.machineGroupRef (required) Refers to the Kubernetes object with CloudStack specific configuration for your nodes. See `CloudStackMachineConfig Fields` below. -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint, `node-role.kubernetes.io/master`. The default control plane components will tolerate the provided taints. @@ -200,7 +201,7 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. @@ -214,13 +215,13 @@ The `ds.meta_data.failuredomain` value will be replaced with a failuredomain nam Modifying the labels associated with the control plane configuration will cause new nodes to be rolled out, replacing the existing nodes. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with CloudStack environment specific configuration. See `CloudStackDatacenterConfig Fields` below. -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with CloudStack specific configuration for your etcd members. See `CloudStackMachineConfig Fields` below. ### kubernetesVersion (required) @@ -234,31 +235,31 @@ If this is a standalone cluster or if it were serving as the management cluster This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with CloudStack specific configuration for your nodes. See `CloudStackMachineConfig Fields` below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. A special label value is supported by the CAPC provider: @@ -272,7 +273,7 @@ The `ds.meta_data.failuredomain` value will be replaced with a failuredomain nam Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 ## CloudStackDatacenterConfig diff --git a/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md b/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md index 0dff0c2f1c2b..cb3839bcb31b 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md @@ -118,6 +118,8 @@ Follow these steps to create an EKS Anywhere cluster that can be used either as ``` 1. Create cluster + + For a regular cluster create (with internet access), type the following: ```bash eksctl anywhere create cluster \ @@ -125,6 +127,15 @@ Follow these steps to create an EKS Anywhere cluster that can be used either as # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation ``` + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + + ```bash + eksctl anywhere create cluster \ + -f eksa-mgmt-cluster.yaml \ + --bundles-override ./eks-anywhere-downloads/bundle-release.yaml \ + # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation + ``` + 1. Once the cluster is created you can use it with the generated `KUBECONFIG` file in your local directory: ```bash diff --git a/docs/content/en/docs/getting-started/docker/_index.md b/docs/content/en/docs/getting-started/docker/_index.md index e5363a682728..99a5449a25f7 100644 --- a/docs/content/en/docs/getting-started/docker/_index.md +++ b/docs/content/en/docs/getting-started/docker/_index.md @@ -133,10 +133,18 @@ sudo install -m 0755 ./kubectl /usr/local/bin/kubectl 1. Create Docker Cluster. Note the following command may take several minutes to complete. You can run the command with -v 6 to increase logging verbosity to see the progress of the command. + For a regular cluster create (with internet access), type the following: + ```bash eksctl anywhere create cluster -f $CLUSTER_NAME.yaml ``` + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + + ```bash + eksctl anywhere create cluster -f $CLUSTER_NAME.yaml --bundles-override ./eks-anywhere-downloads/bundle-release.yaml + ``` + Expand for sample output: ``` diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md b/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md index 6d672cac3360..064bf5208d0e 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md @@ -107,9 +107,20 @@ Make sure you use single quotes around the values so that your shell does not in 1. Create cluster + For a regular cluster create (with internet access), type the following: + + ```bash + eksctl anywhere create cluster \ + -f eksa-mgmt-cluster.yaml \ + # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation + ``` + + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml \ + --bundles-override ./eks-anywhere-downloads/bundle-release.yaml \ # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation ``` diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md index efbdabdd17da..fe844bd5510e 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md @@ -19,7 +19,8 @@ The following additional optional configuration can also be included: * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Gitops]({{< relref "../optional/gitops.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml apiVersion: anywhere.eks.amazonaws.com/v1alpha1 @@ -189,33 +190,33 @@ creation process are [here]({{< relref "./nutanix-prereq/#prepare-a-nutanix-envi ### workerNodeGroupConfigurations (required) This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if `autoscalingConfiguration` is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with Nutanix specific configuration for your nodes. See `NutanixMachineConfig` fields below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: `md-0`) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount -Minimum number of nodes for this node group’s autoscaling configuration. +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) +Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount -Maximum number of nodes for this node group’s autoscaling configuration. +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) +Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with Nutanix specific configuration for your etcd members. See `NutanixMachineConfig` fields below. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with Nutanix environment specific configuration. See `NutanixDatacenterConfig` fields below. ### kubernetesVersion (required) @@ -253,22 +254,22 @@ __Example__:
## NutanixMachineConfig Fields -### cluster +### cluster (required) Reference to the Prism Element cluster. -### cluster.type +### cluster.type (required) Type to identify the Prism Element cluster. (Permitted values: `name` or `uuid`) -### cluster.name +### cluster.name (required) Name of the Prism Element cluster. -### cluster.uuid +### cluster.uuid (required) UUID of the Prism Element cluster. -### image +### image (required) Reference to the OS image used for the system disk. -### image.type +### image.type (required) Type to identify the OS image. (Permitted values: `name` or `uuid`) ### image.name (`name` or `UUID` required) @@ -279,37 +280,37 @@ The `image.name` must contain the `Cluster.Spec.KubernetesVersion` or `Cluster.S UUID of the image The name of the image associated with the `uuid` must contain the `Cluster.Spec.KubernetesVersion` or `Cluster.Spec.WorkerNodeGroupConfiguration[].KubernetesVersion` version (in case of modular upgrade). For example, if the Kubernetes version is 1.24, the name associated with `image.uuid` field must include 1.24, 1_24, 1-24 or 124. -### memorySize +### memorySize (optional) Size of RAM on virtual machines (Default: `4Gi`) ### osFamily (optional) Operating System on virtual machines. Permitted values: `ubuntu` and `redhat`. (Default: `ubuntu`) -### subnet +### subnet (required) Reference to the subnet to be assigned to the VMs. ### subnet.name (`name` or `UUID` required) Name of the subnet. -### subnet.type +### subnet.type (required) Type to identify the subnet. (Permitted values: `name` or `uuid`) ### subnet.uuid (`name` or `UUID` required) UUID of the subnet. -### systemDiskSize +### systemDiskSize (optional) Amount of storage assigned to the system disk. (Default: `40Gi`) -### vcpuSockets +### vcpuSockets (optional) Amount of vCPU sockets. (Default: `2`) -### vcpusPerSocket +### vcpusPerSocket (optional) Amount of vCPUs per socket. (Default: `1`) ### project (optional) Reference to an existing project used for the virtual machines. -### project.type +### project.type (required) Type to identify the project. (Permitted values: `name` or `uuid`) ### project.name (`name` or `UUID` required) diff --git a/docs/content/en/docs/getting-started/optional/api-server-extra-args.md b/docs/content/en/docs/getting-started/optional/api-server-extra-args.md new file mode 100644 index 000000000000..01846604ad3e --- /dev/null +++ b/docs/content/en/docs/getting-started/optional/api-server-extra-args.md @@ -0,0 +1,38 @@ +--- +title: "API Server Extra Args" +linkTitle: "API Server Extra Args" +weight: 60 +description: > + EKS Anywhere cluster yaml specification for Kubernetes API Server Extra Args reference +--- + +## API Server Extra Args support (optional) + +As of EKS Anywhere version v0.20.0, you can pass additional flags to configure the Kubernetes API server in your EKS Anywhere clusters. + +#### Provider support details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:--------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| **Supported?** | ✓ | ✓ | ✓ | ✓ | ✓ | + +In order to configure a cluster with API Server extra args, you need to configure your cluster by updating the cluster configuration file to include the details below. The feature flag `API_SERVER_EXTRA_ARGS_ENABLED=true` needs to be set as an environment variable. + +This is a generic template with some example API Server extra args configuration below for reference: +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: my-cluster-name +spec: + ... + controlPlaneConfiguration: + apiServerExtraArgs: + ... + disable-admission-plugins: "DefaultStorageClass,DefaultTolerationSeconds" + enable-admission-plugins: "NamespaceAutoProvision,NamespaceExists" +``` + +The above example configures the `disable-admission-plugins` and `enable-admission-plugins` options of the API Server to enable additional admission plugins or disable some of the default ones. You can configure any of the API Server options using the above template. + +### controlPlaneConfiguration.apiServerExtraArgs (optional) +Reference the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/#options) for the list of flags that can be configured for the Kubernetes API server in EKS Anywhere \ No newline at end of file diff --git a/docs/content/en/docs/getting-started/optional/healthchecks.md b/docs/content/en/docs/getting-started/optional/healthchecks.md index 198601350441..23162f08c2ea 100644 --- a/docs/content/en/docs/getting-started/optional/healthchecks.md +++ b/docs/content/en/docs/getting-started/optional/healthchecks.md @@ -1,26 +1,29 @@ --- -title: "Machine Health Checks" -linkTitle: "Machine Health Checks" +title: "MachineHealthCheck" +linkTitle: "MachineHealthCheck" weight: 40 aliases: /docs/reference/clusterspec/optional/healthchecks/ description: > - EKS Anywhere cluster yaml specification for machine health check configuration + EKS Anywhere cluster yaml specification for MachineHealthCheck configuration --- -## Machine Health Checks Support +## MachineHealthCheck Support #### Provider support details | | vSphere | Bare Metal | Nutanix | CloudStack | Snow | |:--------------:|:-------:|:----------:|:-------:|:----------:|:----:| | **Supported?** | ✓ | ✓ | ✓ | ✓ | ✓ | -You can configure EKS Anywhere to specify timeouts for machine health checks. -A Machine Health Check is a resource which allows users to define conditions under which Machines within a Cluster should be considered unhealthy. A Machine Health Check is defined on a management cluster and scoped to a particular workload cluster. If not configured in the spec, the default values are used to configure the machine health checks. +You can configure EKS Anywhere to specify timeouts and `maxUnhealthy` values for machine health checks. -Note: Even though the configuration on machine health check timeouts in the EKSA spec is optional, machine health checks are still installed for all clusters using the default timeout values mentioned below. +A MachineHealthCheck (MHC) is a resource in Cluster API which allows users to define conditions under which Machines within a Cluster should be considered unhealthy. A MachineHealthCheck is defined on a management cluster and scoped to a particular workload cluster. -The following cluster spec shows an example of how to configure health check timeouts: +Note: Even though the MachineHealthCheck configuration in the EKS-A spec is optional, MachineHealthChecks are still installed for all clusters using the default values mentioned below. + +EKS Anywhere allows users to have granular control over MachineHealthChecks in their cluster configuration, with default values (derived from Cluster API) being applied if the MHC is not configured in the spec. The top-level `machineHealthCheck` field governs the global MachineHealthCheck settings for all Machines (control-plane and worker). These global settings can be overridden through the nested `machineHealthCheck` field in the control plane configuration and each worker node configuration. If the nested MHC fields are not configured, then the top-level settings are applied to the respective Machines. + +The following cluster spec shows an example of how to configure health check timeouts and `maxUnhealthy`: ```yaml apiVersion: anywhere.eks.amazonaws.com/v1alpha1 kind: Cluster @@ -28,22 +31,89 @@ metadata: name: my-cluster-name spec: ... - machineHealthCheck: + machineHealthCheck: # Top-level MachineHealthCheck configuration + maxUnhealthy: "60%" nodeStartupTimeout: "10m0s" unhealthyMachineTimeout: "5m0s" + ... + controlPlaneConfiguration: # MachineHealthCheck configuration for Control plane + machineHealthCheck: + maxUnhealthy: 100% + nodeStartupTimeout: "15m0s" + unhealthyMachineTimeout: 10m + ... + workerNodeGroupConfigurations: + - count: 1 + name: md-0 + machineHealthCheck: # MachineHealthCheck configuration for Worker Node Group 0 + maxUnhealthy: 100% + nodeStartupTimeout: "10m0s" + unhealthyMachineTimeout: 20m + - count: 1 + name: md-1 + machineHealthCheck: # MachineHealthCheck configuration for Worker Node Group 1 + maxUnhealthy: 100% + nodeStartupTimeout: "10m0s" + unhealthyMachineTimeout: 20m + ... ``` -## Machine Health Check Spec Details +## MachineHealthCheck Spec Details ### __machineHealthCheck__ (optional) -* __Description__: top level key; required to configure machine health check timeouts. +* __Description__: top-level key; required to configure global MachineHealthCheck timeouts and `maxUnhealthy`. * __Type__: object -### __nodeStartupTimeout__ (optional) -* __Description__: determines how long a Machine Health Check should wait for a Node to join the cluster, before considering a Machine unhealthy. +### __machineHealthCheck.maxUnhealthy__ (optional) +* __Description__: determines the maximum permissible number or percentage of unhealthy Machines in a cluster before further remediation is prevented. This ensures that MachineHealthChecks only remediate Machines when the cluster is healthy. +* __Default__: ```100%``` for control plane machines, ```40%``` for worker nodes (Cluster API defaults). +* __Type__: integer (count) or string (percentage) + +### __machineHealthCheck.nodeStartupTimeout__ (optional) +* __Description__: determines how long a MachineHealthCheck should wait for a Node to join the cluster, before considering a Machine unhealthy. * __Default__: ```20m0s``` for Tinkerbell provider, ```10m0s``` for all other providers. * __Minimum Value (If configured)__: ```30s``` * __Type__: string -### __unhealthyMachineTimeout__ (optional) -* __Description__: if the unhealthy condition is matched for the duration of this timeout, the Machine is considered unhealthy. +### __machineHealthCheck.unhealthyMachineTimeout__ (optional) +* __Description__: determines how long the unhealthy Node conditions (e.g., `Ready=False`, `Ready=Unknown`) should be matched for, before considering a Machine unhealthy. * __Default__: ```5m0s``` * __Type__: string + +### __controlPlaneConfiguration.machineHealthCheck__ (optional) +* __Description__: Control plane level configuration for MachineHealthCheck timeouts and `maxUnhealthy` values. +* __Type__: object + +### __controlPlaneConfiguration.machineHealthCheck.maxUnhealthy__ (optional) +* __Description__: determines the maximum permissible number or percentage of unhealthy control plane Machines in a cluster before further remediation is prevented. This ensures that MachineHealthChecks only remediate Machines when the cluster is healthy. +* __Default__: Top-level MHC `maxUnhealthy` if set or ```100%``` otherwise. +* __Type__: integer (count) or string (percentage) + +### __controlPlaneConfiguration.machineHealthCheck.nodeStartupTimeout__ (optional) +* __Description__: determines how long a MachineHealthCheck should wait for a control plane Node to join the cluster, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```20m0s``` for Tinkerbell provider, ```10m0s``` for all other providers otherwise. +* __Minimum Value (if configured)__: ```30s``` +* __Type__: string + +### __controlPlaneConfiguration.machineHealthCheck.unhealthyMachineTimeout__ (optional) +* __Description__: determines how long the unhealthy conditions (e.g., `Ready=False`, `Ready=Unknown`) should be matched for a control plane Node, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```5m0s``` otherwise. +* __Type__: string + +### __workerNodeGroupConfigurations.machineHealthCheck__ (optional) +* __Description__: Worker node level configuration for MachineHealthCheck timeouts and `maxUnhealthy` values. +* __Type__: object + +### __workerNodeGroupConfigurations.machineHealthCheck.maxUnhealthy__ (optional) +* __Description__: determines the maximum permissible number or percentage of unhealthy worker Machines in a cluster before further remediation is prevented. This ensures that MachineHealthChecks only remediate Machines when the cluster is healthy. +* __Default__: Top-level MHC `maxUnhealthy` if set or ```40%``` otherwise. +* __Type__: integer (count) or string (percentage) + +### __workerNodeGroupConfigurations.machineHealthCheck.nodeStartupTimeout__ (optional) +* __Description__: determines how long a MachineHealthCheck should wait for a worker Node to join the cluster, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```20m0s``` for Tinkerbell provider, ```10m0s``` for all other providers otherwise. +* __Minimum Value (if configured)__: ```30s``` +* __Type__: string + +### __workerNodeGroupConfigurations.machineHealthCheck.unhealthyMachineTimeout__ (optional) +* __Description__: determines how long the unhealthy conditions (e.g., `Ready=False`, `Ready=Unknown`) should be matched for a worker Node, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```5m0s``` otherwise. +* __Type__: string diff --git a/docs/content/en/docs/getting-started/snow/snow-getstarted.md b/docs/content/en/docs/getting-started/snow/snow-getstarted.md index 26dd2b8186c9..52c227824b04 100644 --- a/docs/content/en/docs/getting-started/snow/snow-getstarted.md +++ b/docs/content/en/docs/getting-started/snow/snow-getstarted.md @@ -107,13 +107,15 @@ Make sure you use single quotes around the values so that your shell does not in 1. Create cluster - a. For none air-gapped environment + For a regular cluster create (with internet access), type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml ``` - b. For air-gapped environment + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml \ diff --git a/docs/content/en/docs/getting-started/snow/snow-spec.md b/docs/content/en/docs/getting-started/snow/snow-spec.md index 096bbdb9efa9..27d3ffa9b67a 100644 --- a/docs/content/en/docs/getting-started/snow/snow-spec.md +++ b/docs/content/en/docs/getting-started/snow/snow-spec.md @@ -17,7 +17,8 @@ The following additional optional configuration can also be included: * [GitOps]({{< relref "../optional/gitops.md" >}}) * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml apiVersion: anywhere.eks.amazonaws.com/v1alpha1 @@ -124,7 +125,7 @@ range that does not conflict with other devices. >**_NOTE:_** This IP should be outside the network DHCP range as it is a floating IP that gets assigned to one of the control plane nodes for kube-apiserver loadbalancing. -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint. For k8s versions prior to 1.24, it replaces `node-role.kubernetes.io/master`. For k8s versions 1.24+, it replaces `node-role.kubernetes.io/control-plane`. The default control plane components will tolerate the provided taints. @@ -135,7 +136,7 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. @@ -146,47 +147,47 @@ the existing nodes. This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with Snow specific configuration for your nodes. See `SnowMachineConfig Fields` below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members. -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with Snow specific configuration for your etcd members. See `SnowMachineConfig Fields` below. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with Snow environment specific configuration. See `SnowDatacenterConfig Fields` below. ### kubernetesVersion (required) @@ -194,7 +195,7 @@ The Kubernetes version you want to use for your cluster. Supported values: `1.28 ## SnowDatacenterConfig Fields -### identityRef +### identityRef (required) Refers to the Kubernetes secret object with Snow devices credentials used to reconcile the cluster. ## SnowMachineConfig Fields @@ -240,7 +241,7 @@ Refers to a `SnowIPPool` object which provides a range of ip addresses. When spe ### containersVolume (optional) Configuration option for customizing containers data storage volume. -### containersVolume.size +### containersVolume.size (optional) Size of the storage for containerd runtime in Gi. The field is optional for Ubuntu and if specified, the size must be no smaller than 8 Gi. @@ -256,10 +257,10 @@ Type of the containers volume. Permitted values: `sbp1`, `sbg1`. (Default: `sbp1 ### nonRootVolumes (optional) Configuration options for the non root storage volumes. -### nonRootVolumes[0].deviceName +### nonRootVolumes[0].deviceName (optional) Non root volume device name. Must be specified and cannot have prefix "/dev/sda" as it is reserved for root volume and containers volume. -### nonRootVolumes[0].size +### nonRootVolumes[0].size (optional) Size of the storage device for the non root volume. Must be no smaller than 8 Gi. ### nonRootVolumes[0].type (optional) @@ -269,14 +270,14 @@ Type of the non root volume. Permitted values: `sbp1`, `sbg1`. (Default: `sbp1`) ## SnowIPPool Fields -### pools[0].ipStart +### pools[0].ipStart (optional) Start address of an IP range. -### pools[0].ipEnd +### pools[0].ipEnd (optional) End address of an IP range. -### pools[0].subnet +### pools[0].subnet (optional) An IP subnet for determining whether an IP is within the subnet. -### pools[0].gateway +### pools[0].gateway (optional) Gateway of the subnet for routing purpose. diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md b/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md index e452e7a21593..5b30ae9a68f7 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md @@ -116,6 +116,8 @@ Make sure you use single quotes around the values so that your shell does not in Consider passing `--skip-validations vsphere-user-privilege` along with `eksctl` or upgrading the vSphere version (**recommended**). {{% /alert %}} + For a regular cluster create (with internet access), type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml \ @@ -123,6 +125,15 @@ Make sure you use single quotes around the values so that your shell does not in ``` + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + + ```bash + eksctl anywhere create cluster \ + -f eksa-mgmt-cluster.yaml \ + --bundles-override ./eks-anywhere-downloads/bundle-release.yaml \ + # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation + ``` + 1. Once the cluster is created you can use it with the generated `KUBECONFIG` file in your local directory: ```bash diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md b/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md index c2fc3d17343f..4ec14ba69933 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md @@ -15,7 +15,7 @@ Set up an Administrative machine as described in [Install EKS Anywhere ]({{< rel ## Prepare a VMware vSphere environment To prepare a VMware vSphere environment to run EKS Anywhere, you need the following: -* A vSphere 7+ environment running vCenter. +* A vSphere 7 or 8 environment running vCenter. * Capacity to deploy 6-10 VMs. * DHCP service running in vSphere environment in the primary VM network for your workload cluster. * [Prepare DHCP IP addresses pool]({{< relref "../../clustermgmt/cluster-upgrades/vsphere-and-cloudstack-upgrades.md/#prepare-dhcp-ip-addresses-pool" >}}) diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md index d8104ee6c58f..c91212588dda 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md @@ -22,45 +22,45 @@ spec: cniConfig: # Cluster CNI plugin - default: cilium (required) cilium: {} pods: - cidrBlocks: # Subnet CIDR notation for pods (required) + cidrBlocks: # Internal Kubernetes subnet CIDR block for pods (required) - 192.168.0.0/16 services: - cidrBlocks: # Subnet CIDR notation for services (required) + cidrBlocks: # Internal Kubernetes subnet CIDR block for services (required) - 10.96.0.0/12 controlPlaneConfiguration: # Specific cluster control plane config (required) count: 2 # Number of control plane nodes (required) - endpoint: # IP for control plane endpoint (required) - host: "192.168.0.10" + endpoint: # IP for control plane endpoint on your network (required) + host: xxx.xxx.xxx.xxx machineGroupRef: # vSphere-specific Kubernetes node config (required) kind: VSphereMachineConfig name: my-cluster-machines - taints: # Taints applied to control plane nodes + taints: # Taints applied to control plane nodes - key: "key1" value: "value1" effect: "NoSchedule" - labels: # Labels applied to control plane nodes + labels: # Labels applied to control plane nodes "key1": "value1" "key2": "value2" - datacenterRef: # Kubernetes object with vSphere-specific config + datacenterRef: # Kubernetes object with vSphere-specific config kind: VSphereDatacenterConfig name: my-cluster-datacenter externalEtcdConfiguration: - count: 3 # Number of etcd members - machineGroupRef: # vSphere-specific Kubernetes etcd config + count: 3 # Number of etcd members + machineGroupRef: # vSphere-specific Kubernetes etcd config kind: VSphereMachineConfig name: my-cluster-machines kubernetesVersion: "1.25" # Kubernetes version to use for the cluster (required) workerNodeGroupConfigurations: # List of node groups you can define for workers (required) - - count: 2 # Number of worker nodes + - count: 2 # Number of worker nodes machineGroupRef: # vSphere-specific Kubernetes node objects (required) kind: VSphereMachineConfig name: my-cluster-machines name: md-0 # Name of the worker nodegroup (required) - taints: # Taints to apply to worker node group nodes + taints: # Taints to apply to worker node group nodes - key: "key1" value: "value1" effect: "NoSchedule" - labels: # Labels to apply to worker node group nodes + labels: # Labels to apply to worker node group nodes "key1": "value1" "key2": "value2" --- @@ -110,7 +110,8 @@ The following additional optional configuration can also be included: * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Host OS Config]({{< relref "../optional/hostOSConfig.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ## Cluster Fields @@ -136,7 +137,7 @@ range that does not conflict with other VMs. the control plane nodes for kube-apiserver loadbalancing. Suggestions on how to ensure this IP does not cause issues during cluster creation process are [here]({{< relref "../vsphere/vsphere-prereq/#prepare-a-vmware-vsphere-environment" >}}) -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint. For k8s versions prior to 1.24, it replaces `node-role.kubernetes.io/master`. For k8s versions 1.24+, it replaces `node-role.kubernetes.io/control-plane`. The default control plane components will tolerate the provided taints. @@ -147,7 +148,7 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. @@ -158,49 +159,49 @@ the existing nodes. This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with vsphere specific configuration for your nodes. See [VSphereMachineConfig Fields](#vspheremachineconfig-fields) below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must **NOT** have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. [Supported values]({{< relref "../../concepts/support-versions/#kubernetes-versions" >}}): `1.28`, `1.27`, `1.26`, `1.25`, `1.24` Must be less than or equal to the cluster `kubernetesVersion` defined at the root level of the cluster spec. The worker node kubernetesVersion must be no more than two minor Kubernetes versions lower than the cluster control plane's Kubernetes version. Removing `workerNodeGroupConfiguration.kubernetesVersion` will trigger an upgrade of the node group to the `kubernetesVersion` defined at the root level of the cluster spec. -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with vsphere specific configuration for your etcd members. See [VSphereMachineConfig Fields](#vspheremachineconfig-fields) below. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with vsphere environment specific configuration. See [VSphereDatacenterConfig Fields](#vspheredatacenterconfig-fields) below. ### kubernetesVersion (required) diff --git a/docs/content/en/docs/osmgmt/artifacts.md b/docs/content/en/docs/osmgmt/artifacts.md index 451c8eaaadac..243b296f6752 100644 --- a/docs/content/en/docs/osmgmt/artifacts.md +++ b/docs/content/en/docs/osmgmt/artifacts.md @@ -12,7 +12,7 @@ EKS Anywhere supports three different node operating systems: * Bottlerocket: For vSphere and Bare Metal providers * Ubuntu: For vSphere, Bare Metal, Nutanix, and Snow providers -* Red Hat Enterprise Linux (RHEL): For vSphere, CloudStack, and Bare Metal providers +* Red Hat Enterprise Linux (RHEL): For vSphere, CloudStack, Nutanix, and Bare Metal providers Bottlerocket OVAs and images are distributed by the EKS Anywhere project. To build your own Ubuntu-based or RHEL-based EKS Anywhere node, see [Building node images]({{< relref "#building-node-images">}}). @@ -25,7 +25,7 @@ Several code snippets on this page use `curl` and `yq` commands. Refer to the [T Artifacts for EKS Anywhere Bare Metal clusters are listed below. If you like, you can download these images and serve them locally to speed up cluster creation. -See descriptions of the [osImageURL]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl" >}}) and [`hookImagesURLPath`]({{< relref "../getting-started/baremetal/bare-spec#hookimagesurlpath" >}}) fields for details. +See descriptions of the [`osImageURL`]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}) and [`hookImagesURLPath`]({{< relref "../getting-started/baremetal/bare-spec#hookimagesurlpath-optional" >}}) fields for details. ### Ubuntu or RHEL OS images for Bare Metal @@ -45,7 +45,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` ```bash @@ -63,7 +63,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` kernel: @@ -93,7 +93,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` ```bash @@ -118,7 +118,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` ```bash @@ -163,7 +163,7 @@ export KUBEVERSION="1.27" Using a specific EKS Anywhere version ```bash - EKSA_RELEASE_VERSION=v0.18.0 + EKSA_RELEASE_VERSION= ``` Set the Bottlerocket image format to the desired value (`ova` for the VMware variant or `raw` for the Baremetal variant) @@ -324,6 +324,31 @@ Packer will require prior authentication with your AWS account to launch EC2 ins Prism Central Administrator permissions are required to build a Nutanix image using `image-builder`. +### Downloading the `image-builder` CLI + +You will need to download the `image-builder` CLI corresponding to the version of EKS Anywhere you are using. The `image-builder` CLI can be downloaded using the commands provided below: + +Using the latest EKS Anywhere version +```bash +EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") +``` + +OR + +Using a specific EKS Anywhere version +```bash +EKSA_RELEASE_VERSION= +``` + +```bash +cd /tmp +BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") +IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") +curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder +sudo install -m 0755 ./image-builder /usr/local/bin/image-builder +cd - +``` + ### Build vSphere OVA node images These steps use `image-builder` to create an Ubuntu-based or RHEL-based image for vSphere. Before proceeding, ensure that the above system-level, network-level and vSphere-specific [prerequisites]({{< relref "#prerequisites">}}) have been met. @@ -379,29 +404,6 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` 1. Get the latest version of `govc`: ```bash curl -L -o - "https://github.com/vmware/govmomi/releases/latest/download/govc_$(uname -s)_$(uname -m).tar.gz" | sudo tar -C /usr/local/bin -xvzf - govc @@ -490,11 +492,11 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: For vSphere use `vsphere` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--vsphere-config`: vSphere configuration file (`vsphere.json` in this example) ```bash - image-builder build --os ubuntu --hypervisor vsphere --release-channel 1-28 --vsphere-config vsphere.json + image-builder build --os ubuntu --hypervisor vsphere --release-channel 1-29 --vsphere-config vsphere.json ``` **Red Hat Enterprise Linux** @@ -504,11 +506,11 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `redhat` * `--os-version`: `8` (default: `8`) * `--hypervisor`: For vSphere use `vsphere` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--vsphere-config`: vSphere configuration file (`vsphere.json` in this example) ```bash - image-builder build --os redhat --hypervisor vsphere --release-channel 1-28 --vsphere-config vsphere.json + image-builder build --os redhat --hypervisor vsphere --release-channel 1-29 --vsphere-config vsphere.json ``` ### Build Bare Metal node images @@ -574,30 +576,6 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` - 1. Create an Ubuntu or Red Hat image: **Ubuntu** @@ -607,12 +585,11 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: `baremetal` - * `--release-channel`: A [supported EKS Distro release](https://anywhere.eks.amazonaws.com/docs/reference/support/support-versions/) - formatted as "[major]-[minor]"; for example "1-27" + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--baremetal-config`: baremetal config file if using proxy ```bash - image-builder build --os ubuntu --hypervisor baremetal --release-channel 1-27 + image-builder build --os ubuntu --hypervisor baremetal --release-channel 1-29 ``` **Red Hat Enterprise Linux (RHEL)** @@ -637,14 +614,12 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `redhat` * `--os-version`: `8` (default: `8`) * `--hypervisor`: `baremetal` - * `--release-channel`: A [supported EKS Distro release](https://anywhere.eks.amazonaws.com/docs/reference/support/support-versions/) - formatted as "[major]-[minor]"; for example "1-27" + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--baremetal-config`: Bare metal config file ```bash - image-builder build --os redhat --hypervisor baremetal --release-channel 1-28 --baremetal-config baremetal.json + image-builder build --os redhat --hypervisor baremetal --release-channel 1-29 --baremetal-config baremetal.json ``` - 1. To consume the image, serve it from an accessible web server, then create the [bare metal cluster spec]({{< relref "../getting-started/baremetal/bare-spec/" >}}) configuring the `osImageURL` field URL of the image. For example: @@ -652,7 +627,7 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo osImageURL: "http:///my-ubuntu-v1.23.9-eks-a-17-amd64.gz" ``` - See descriptions of [osImageURL]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl" >}}) for further information. + See descriptions of [`osImageURL`]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}) for further information. ### Build CloudStack node images @@ -718,29 +693,6 @@ These steps use `image-builder` to create a RHEL-based image for CloudStack. Bef ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` 1. Create a CloudStack configuration file (for example, `cloudstack.json`) to provide the location of a Red Hat Enterprise Linux 8 ISO image and related checksum and Red Hat subscription information: ```json { @@ -752,19 +704,17 @@ These steps use `image-builder` to create a RHEL-based image for CloudStack. Bef } ``` >**_NOTE_**: To build the RHEL-based image, `image-builder` temporarily consumes a Red Hat subscription. That subscription is removed once the image is built. - 1. To create a RHEL-based image, run `image-builder` with the following options: * `--os`: `redhat` * `--os-version`: `8` (default: `8`) * `--hypervisor`: For CloudStack use `cloudstack` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--cloudstack-config`: CloudStack configuration file (`cloudstack.json` in this example) ```bash - image-builder build --os redhat --hypervisor cloudstack --release-channel 1-28 --cloudstack-config cloudstack.json + image-builder build --os redhat --hypervisor cloudstack --release-channel 1-29 --cloudstack-config cloudstack.json ``` - 1. To consume the resulting RHEL-based image, add it as a template to your CloudStack setup as described in [Preparing CloudStack]({{< relref "../getting-started/cloudstack/cloudstack-preparation" >}}). ### Build Snow node images @@ -822,29 +772,6 @@ These steps use `image-builder` to create an Ubuntu-based Amazon Machine Image ( ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd /home/$USER - ``` 1. Create an AMI configuration file (for example, `ami.json`) that contains various AMI parameters. For example: ```json @@ -900,19 +827,18 @@ These steps use `image-builder` to create an Ubuntu-based Amazon Machine Image ( ##### **volume_type** The type of root EBS volume, such as gp2, gp3, io1, etc. (default: `gp3`). - 1. To create an Ubuntu-based image, run `image-builder` with the following options: * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: For AMI, use `ami` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--ami-config`: AMI configuration file (`ami.json` in this example) ```bash - image-builder build --os ubuntu --hypervisor ami --release-channel 1-28 --ami-config ami.json + image-builder build --os ubuntu --hypervisor ami --release-channel 1-29 --ami-config ami.json ``` -1. After the build, the Ubuntu AMI will be available in your AWS account in the AWS region specified in your AMI configuration file. If you wish to export it as a Raw image, you can achieve this using the AWS CLI. +1. After the build, the Ubuntu AMI will be available in your AWS account in the AWS region specified in your AMI configuration file. If you wish to export it as a raw image, you can achieve this using the AWS CLI. ``` ARTIFACT_ID=$(cat | jq -r '.builds[0].artifact_id') AMI_ID=$(echo $ARTIFACT_ID | cut -d: -f2) @@ -929,7 +855,6 @@ These steps use `image-builder` to create an Ubuntu-based Amazon Machine Image ( These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV and import it into the AOS Image Service. Before proceeding, ensure that the above system-level, network-level and Nutanix-specific [prerequisites]({{< relref "#prerequisites">}}) have been met. 1. Download an [Ubuntu cloud image](https://cloud-images.ubuntu.com/releases) or [RHEL cloud image](https://access.redhat.com/downloads/content/rhel) pertaining to your desired OS and OS version and upload it to the AOS Image Service using Prism. You will need to specify the image's name in AOS as the `source_image_name` in the `nutanix.json` config file specified below. You can also skip this step and directly use the `image_url` field in the config file to provide the URL of a publicly accessible image as source. - 1. Create a Linux user for running image-builder. ```bash sudo adduser image-builder @@ -981,29 +906,6 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` 1. Create a `nutanix.json` config file. More details on values can be found in the [image-builder documentation](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html). See example below: ```json { @@ -1037,11 +939,11 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: For Nutanix use `nutanix` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--nutanix-config`: Nutanix configuration file (`nutanix.json` in this example) ```bash - image-builder build --os ubuntu --hypervisor nutanix --release-channel 1-28 --nutanix-config nutanix.json + image-builder build --os ubuntu --hypervisor nutanix --release-channel 1-29 --nutanix-config nutanix.json ``` **Red Hat Enterprise Linux** @@ -1051,11 +953,11 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a * `--os`: `redhat` * `--os-version`: `8` or `9` (default: `8`) * `--hypervisor`: For Nutanix use `nutanix` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--nutanix-config`: Nutanix configuration file (`nutanix.json` in this example) ```bash - image-builder build --os redhat --hypervisor nutanix --release-channel 1-28 --nutanix-config nutanix.json + image-builder build --os redhat --hypervisor nutanix --release-channel 1-29 --nutanix-config nutanix.json ``` ### Configuring OS version @@ -1094,13 +996,11 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a 9.2 9 - Nutanix only + CloudStack and Nutanix only -Currently, Ubuntu is the only operating system that supports multiple `os-version` values. - ### Building images for a specific EKS Anywhere version This section provides information about the relationship between `image-builder` and EKS Anywhere CLI version, and provides instructions on building images pertaining to a specific EKS Anywhere version. @@ -1154,7 +1054,7 @@ The table below shows the possible firmware options for the hypervisor and OS co | | vSphere | Baremetal | CloudStack | Nutanix | Snow | |:----------:|:-------------------:|:-------------------:|:----------:|:-------:|:----:| -| **Ubuntu** | bios (default), efi | bios, efi (default) | bios | bios | bios | +| **Ubuntu** | bios (default), efi | efi | bios | bios | bios | | **RHEL** | bios | bios | bios | bios | bios | ### Mounting additional files @@ -1250,7 +1150,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be #### Building node images in an air-gapped environment 1. Identify the EKS-D release channel (generally aligning with Kubernetes version) to build. For example, 1.27 or 1.28 -2. Identify the latest release of EKS-A from [changelog]({{< ref "/docs/whatsnew/changelog" >}}). For example, v0.18.0 +2. Identify the latest release of EKS-A from [changelog]({{< ref "/docs/whatsnew/changelog" >}}). For example, 3. Run `image-builder` CLI to download manifests in an environment with internet connectivity ```bash image-builder download manifests @@ -1280,7 +1180,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be fi if [ -z "${RELEASE_CHANNEL}" ]; then - echo "RELEASE_CHANNEL not set. Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28" + echo "RELEASE_CHANNEL not set. Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29" exit 1 fi @@ -1349,7 +1249,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be ``` 5. Set EKS-A release version and EKS-D release channel as environment variables and execute the script ```bash - EKSA_RELEASE_VERSION=v0.18.0 RELEASE_CHANNEL=1-28 ./download-airgapped-artifacts.sh + EKSA_RELEASE_VERSION= RELEASE_CHANNEL=1-28 ./download-airgapped-artifacts.sh ``` Executing this script will create a local directory `eks-a-d-artifacts` and download the required EKS-A and EKS-D artifacts. 6. Create two repositories, one for EKS-A and one for EKS-D on the private artifacts server. @@ -1367,7 +1267,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be deb [trusted=yes] http:///debian focal-backports main restricted universe multiverse deb [trusted=yes] http:///debian focal-security main restricted universe multiverse ``` - `focal` in the above file refers to the name of the Ubuntu OS for version 20.04. If using Ubuntu version 22.04 replace `focal` with `jammy`. + `focal` in the above file refers to the code name for the Ubuntu 20.04 release. If using Ubuntu version 22.04, replace `focal` with `jammy`. 11. Create a provider or hypervisor configuration file and add the following fields ```json { @@ -1378,7 +1278,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be "extra_repos": "", "disable_public_repos": "true", "iso_url": "http:///ubuntu-20.04.1-legacy-server-amd64.iso", - "iso_checksum": "", "iso_checksum_type": "sha256" } ``` @@ -1388,7 +1288,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be ``` -## Images +## Container Images -The various images for EKS Anywhere can be found [in the EKS Anywhere ECR repository](https://gallery.ecr.aws/eks-anywhere/). -The various images for EKS Distro can be found [in the EKS Distro ECR repository](https://gallery.ecr.aws/eks-distro/). +* The container images distributed by EKS Anywhere can be found in the [EKS Anywhere ECR Public Gallery](https://gallery.ecr.aws/eks-anywhere). +* The container images distributed by EKS Distro can be found in the [EKS Distro ECR Public Gallery](https://gallery.ecr.aws/eks-distro). diff --git a/docs/content/en/docs/osmgmt/overview.md b/docs/content/en/docs/osmgmt/overview.md index e17a172cb014..16af2b435e1d 100644 --- a/docs/content/en/docs/osmgmt/overview.md +++ b/docs/content/en/docs/osmgmt/overview.md @@ -12,7 +12,7 @@ Reference the table below for the operating systems supported per deployment opt || vSphere | Bare metal | Snow | CloudStack | Nutanix | | --- | :---: | :---: | :---: | :---: | :---: | -| Bottlerocket | ✔ | ✔ | — | — | — | +| Bottlerocket | ✔ | — | — | — | — | | Ubuntu | ✔ | ✔ | ✔ | — | ✔ | | RHEL | ✔ | ✔ | — | ✔ | ✔ | @@ -22,7 +22,7 @@ Reference the table below for the operating systems supported per deployment opt | Ubuntu | 20.04.x, 22.04.x | | RHEL | 8.x, 9.x* | -*Nutanix only +*Nutanix and CloudStack only With the vSphere, bare metal, Snow, CloudStack and Nutanix deployment options, EKS Anywhere provisions the operating system when new machines are deployed during cluster creation, upgrade, and scaling operations. You can configure the operating system to use through the EKS Anywhere cluster spec, which varies by deployment option. See the deployment option sections below for an overview of how the operating system configuration works per deployment option. @@ -30,7 +30,7 @@ With the vSphere, bare metal, Snow, CloudStack and Nutanix deployment options, E To configure the operating system to use for EKS Anywhere clusters on vSphere, use the [`VSphereMachingConfig` `spec.template` field]({{< ref "/docs/getting-started/vsphere/vsphere-spec#template-optional" >}}). The template name corresponds to the template you imported into your vSphere environment. See the [Customize OVAs]({{< ref "/docs/getting-started/vsphere/customize/customize-ovas" >}}) and [Import OVAs]({{< ref "/docs/getting-started/vsphere/customize/vsphere-ovas" >}}) documentation pages for more information. Changing the template after cluster creation will result in the deployment of new machines. ## Bare metal -To configure the operating system to use for EKS Anywhere clusters on bare metal, use the [`TinkerbellDatacenterConfig` `spec.osImageURL` field]({{< ref "/docs/getting-started/baremetal/bare-spec#osimageurl" >}}). This field can be used to stream the operating system from a custom location and is required to use Ubuntu or RHEL. You cannot change the `osImageURL` after creating your cluster. To upgrade the operating system, you must replace the image at the existing `osImageURL` location with a new image. Operating system changes are only deployed when an action that triggers a deployment of new machines is triggered, which includes Kubernetes version upgrades only at this time. +To configure the operating system to use for EKS Anywhere clusters on bare metal, use the [`TinkerbellDatacenterConfig` `spec.osImageURL` field]({{< ref "/docs/getting-started/baremetal/bare-spec#osimageurl-optional" >}}). This field can be used to stream the operating system from a custom location and is required to use Ubuntu or RHEL. You cannot change the `osImageURL` after creating your cluster. To upgrade the operating system, you must replace the image at the existing `osImageURL` location with a new image. Operating system changes are only deployed when an action that triggers a deployment of new machines is triggered, which includes Kubernetes version upgrades only at this time. ## Snow To configure the operating to use for EKS Anywhere clusters on Snow, use the [`SnowMachineConfig` `spec.osFamily` field]({{< ref "/docs/getting-started/snow/snow-spec#osfamily" >}}). At this time, only Ubuntu is supported for use with EKS Anywhere clusters on Snow. You can customize the instance image with the [`SnowMachineConfig` `spec.amiID` field]({{< ref "/docs/getting-started/snow/snow-spec#amiid-optional" >}}) and the instance type with the [`SnowMachineConfig` `spec.instanceType` field]({{< ref "/docs/getting-started/snow/snow-spec#instancetype-optional" >}}). Changes to these fields after cluster creation will result in the deployment of new machines. @@ -39,4 +39,4 @@ To configure the operating to use for EKS Anywhere clusters on Snow, use the [`S To configure the operating system to use for EKS Anywhere clusters on CloudStack, use the [`CloudStackMachineConfig` `spec.template.name` field]({{< ref "/docs/getting-started/cloudstack/cloud-spec#templateidname-required" >}}). At this time, only RHEL is supported for use with EKS Anywhere clusters on CloudStack. Changing the template name field after cluster creation will result in the deployment of new machines. ## Nutanix -To configure the operating system to use for EKS Anywhere clusters on Nutanix, use the [`NutanixMachineConfig` `spec.image.name` field]({{< ref "/docs/getting-started/nutanix/nutanix-spec#imagename-name-or-uuid-required" >}}) or the image uuid field. At this time, only Ubuntu is supported for use with EKS Anywhere clusters on Nutanix. Changing the image name or uuid field after cluster creation will result in the deployment of new machines. +To configure the operating system to use for EKS Anywhere clusters on Nutanix, use the [`NutanixMachineConfig` `spec.image.name` field]({{< ref "/docs/getting-started/nutanix/nutanix-spec#imagename-name-or-uuid-required" >}}) or the image uuid field. At this time, only Ubuntu and RHEL are supported for use with EKS Anywhere clusters on Nutanix. Changing the image name or uuid field after cluster creation will result in the deployment of new machines. diff --git a/docs/content/en/docs/overview/faq/_index.md b/docs/content/en/docs/overview/faq/_index.md index a13b4691493c..222d99206ebe 100644 --- a/docs/content/en/docs/overview/faq/_index.md +++ b/docs/content/en/docs/overview/faq/_index.md @@ -103,4 +103,4 @@ There would need to be a change to the upstream project to support ESXi. ### Can I deploy EKS Anywhere on a single node? -Yes. Single node cluster deployment is supported for Bare Metal. See [workerNodeGroupConfigurations]({{< relref "../../getting-started/baremetal/bare-spec/#workernodegroupconfigurations">}}) +Yes. Single node cluster deployment is supported for Bare Metal. See [workerNodeGroupConfigurations]({{< relref "../../getting-started/baremetal/bare-spec/#workernodegroupconfigurations-optional">}}) diff --git a/docs/content/en/docs/overview/partner/_index.md b/docs/content/en/docs/overview/partner/_index.md index 6fe0a9bb2262..184a3b5366f1 100644 --- a/docs/content/en/docs/overview/partner/_index.md +++ b/docs/content/en/docs/overview/partner/_index.md @@ -16,60 +16,65 @@ The following shows validated EKS Anywhere partners whose products have passed c ``` Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Date of Conformance Test : 2024-05-02 Following ISV Partners have Validated their Conformance : -VENDOR_PRODUCT VENDOR_PRODUCT_TYPE -dynatrace dynatrace -komodor k8s-watcher -kong kong-enterprise -accuknox kubearmor -kubecost cost-analyzer -nirmata enterprise-kyverno -lacework polygraph -suse neuvector -newrelic newrelic-bundle -perfectscale perfectscale -pulumi pulumi-kubernetes-operator -sysdig sysdig-agent -hashicorp vault +VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION +aqua aqua-enforcer 2022.4.20 +dynatrace dynatrace 0.10.1 +komodor k8s-watcher 1.15.5 +kong kong-enterprise 2.27.0 +accuknox kubearmor v1.3.2 +kubecost cost-analyzer 2.1.0 +nirmata enterprise-kyverno 1.6.10 +lacework polygraph 6.11.0 +newrelic nri-bundle 5.0.64 +perfectscale perfectscale v0.0.38 +pulumi pulumi-kubernetes-operator 0.3.0 +solo.io solo-istiod 1.18.3-eks-a +sysdig sysdig-agent 1.6.3 +tetrate.io tetrate-istio-distribution 1.18.1 +hashicorp vault 0.25.0 ``` ## vSphere provider validated partners ``` -Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Kubernetes Version : 1.28 +Date of Conformance Test : 2024-05-02 Following ISV Partners have Validated their Conformance : -VENDOR_PRODUCT VENDOR_PRODUCT_TYPE -dynatrace dynatrace -komodor k8s-watcher -kong kong-enterprise -accuknox kubearmor -kubecost cost-analyzer -nirmata enterprise-kyverno -lacework polygraph -suse neuvector -newrelic newrelic-bundle -perfectscale perfectscale -pulumi pulumi-kubernetes-operator -sysdig sysdig-agent -hashicorp vault +VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION +aqua aqua-enforcer 2022.4.20 +dynatrace dynatrace 0.10.1 +komodor k8s-watcher 1.15.5 +kong kong-enterprise 2.27.0 +accuknox kubearmor v1.3.2 +kubecost cost-analyzer 2.1.0 +nirmata enterprise-kyverno 1.6.10 +lacework polygraph 6.11.0 +newrelic nri-bundle 5.0.64 +perfectscale perfectscale v0.0.38 +pulumi pulumi-kubernetes-operator 0.3.0 +solo.io solo-istiod 1.18.3-eks-a +sysdig sysdig-agent 1.6.3 +tetrate.io tetrate-istio-distribution 1.18.1 +hashicorp vault 0.25.0 ``` ## AWS Snow provider validated partners ``` -Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Kubernetes Version : 1.28 +Date of Conformance Test : 2023-11-10 Following ISV Partners have Validated their Conformance : VENDOR_PRODUCT VENDOR_PRODUCT_TYPE dynatrace dynatrace +solo.io solo-istiod komodor k8s-watcher kong kong-enterprise accuknox kubearmor @@ -86,21 +91,23 @@ hashicorp vault ## AWS Outpost provider validated partners ``` Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Date of Conformance Test : 2024-05-02 Following ISV Partners have Validated their Conformance : -VENDOR_PRODUCT VENDOR_PRODUCT_TYPE -dynatrace dynatrace -komodor k8s-watcher -kong kong-enterprise -accuknox kubearmor -kubecost cost-analyzer -nirmata enterprise-kyverno -lacework polygraph -suse neuvector -perfectscale perfectscale -pulumi pulumi-kubernetes-operator -sysdig sysdig-agent -hashicorp vault +VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION +aqua aqua-enforcer 2022.4.20 +dynatrace dynatrace 0.10.1 +komodor k8s-watcher 1.15.5 +kong kong-enterprise 2.27.0 +accuknox kubearmor v1.3.2 +kubecost cost-analyzer 2.1.0 +nirmata enterprise-kyverno 1.6.10 +lacework polygraph 6.11.0 +perfectscale perfectscale v0.0.38 +pulumi pulumi-kubernetes-operator 0.3.0 +solo.io solo-istiod 1.18.3-eks-a +sysdig sysdig-agent 1.6.3 +tetrate.io tetrate-istio-distribution 1.18.1 +hashicorp vault 0.25.0 ``` diff --git a/docs/content/en/docs/troubleshooting/troubleshooting.md b/docs/content/en/docs/troubleshooting/troubleshooting.md index 54f07ce0ad73..982573d78fa4 100755 --- a/docs/content/en/docs/troubleshooting/troubleshooting.md +++ b/docs/content/en/docs/troubleshooting/troubleshooting.md @@ -248,6 +248,45 @@ kubectl logs -n --kubeconfig= .... ``` +### Kubectl commands return dial tcp: i/o timeout + +If you are unable to run kubectl commands on a cluster due to timeout errors, then it is possible that the server endpoint in the kubeconfig does not match the control plane's endpoint in the infrastructure provider due to kube-vip failing to allocate a virtual IP address to the cluster. If the endpoints do not match, you can ssh into the control plane node to gather logs instead. The kubelet logs can be obtained by running `journalctl -u kubelet.service --no-pager`. It may also be helpful to look at kube-vip logs, which can be found in the `/var/log/pods/kube-system_kube-vip-*` directory. + +#### Verify Cluster Certificates are valid + +If kubectl commands are not working due to timeout issues, then it may also be helpful to verify that certificates on the etcd and control plane nodes have not expired. + +SSH to one of your etcd nodes and view the etcd container logs in `/var/log/containers`. + +View the control plane certificates by SSHing into one of your control plane nodes and run the following commands to view the validity of the /var/lib/kubeadm certificates and see their expiration dates. +{{< tabpane >}} +{{< tab header="Ubuntu or RHEL" lang="bash" >}} +sudo kubeadm certs check-expiration +{{< /tab >}} +{{< tab header="Bottlerocket" lang="bash" >}} +# you would be in the admin container when you ssh to the Bottlerocket machine +# open a root shell +sudo sheltie + +# pull the image +IMAGE_ID=$(apiclient get | apiclient exec admin jq -r '.settings["host-containers"]["kubeadm-bootstrap"].source') + +# ctr is the containerd cli. +# For more information, see https://github.com/projectatomic/containerd/blob/master/docs/cli.md +ctr image pull ${IMAGE_ID} + +# you may see missing etcd certs error, which is expected if you have external etcd nodes +ctr run \ +--mount type=bind,src=/var/lib/kubeadm,dst=/var/lib/kubeadm,options=rbind:rw \ +--mount type=bind,src=/var/lib/kubeadm,dst=/etc/kubernetes,options=rbind:rw \ +--rm \ +${IMAGE_ID} tmp-certs-check \ +/opt/bin/kubeadm certs check-expiration +{{< /tab >}} +{{< /tabpane >}} + +EKS Anywhere typically renews certificates when upgrading a cluster. However, if a cluster has not been upgraded for over a year, then it is necessary to manually renew these certificates. Please see [Certificate rotation]({{< relref "../clustermgmt/security/manually-renew-certs.md" >}}) to manually rotate expired certificates. + ### Bootstrap cluster fails to come up If your bootstrap cluster has problems you may get detailed logs by looking at the files created under the `${CLUSTER_NAME}/logs` folder. The capv-controller-manager log file will surface issues with vsphere specific configuration while the capi-controller-manager log file might surface other generic issues with the cluster configuration passed in. @@ -371,10 +410,10 @@ If the bootstrap log indicates that the etcadm join operation fail, this can mea #### New etcd machine cannot find the existing etcd cluster members -The edcdadm log shows error that the new etcd machine cannot connect to the existing etcd cluster memebers. This means the `etcdadm-init` secret is outdated. To update it, run +The edcdadm log shows error that the new etcd machine cannot connect to the existing etcd cluster members. This means the `etcdadm-init` secret is outdated. To update it, run ```sh -kubectl edit -etcd-init -n eksa-system +kubectl edit secrets -etcd-init -n eksa-system ``` and make sure the new etcd machine IP is included in the secret. diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 3b3011a3fd17..ee9542bd1824 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -9,20 +9,142 @@ description: > --- {{% alert title="Announcements" color="warning" %}} -* EKS Anywhere release `v0.19.0` introduces support for creating Kubernetes version v1.29 clusters. A conformance test was [promoted](https://github.com/kubernetes/kubernetes/pull/120069) in Kubernetes v1.29 that verifies that `Service`s serving different L4 protocols with the same port number can co-exist in a Kubernetes cluster. This is not supported in Cilium, the CNI deployed on EKS Anywhere clusters, because Cilium currently does not differentiate between TCP and UDP protocols for Kubernetes `Service`s. Hence EKS Anywhere v1.29 clusters will not pass this specific conformance test. This service protocol differentiation is being tracked in an upstream [issue](https://github.com/cilium/cilium/issues/9207) and will be supported in a future Cilium release. A future release of EKS Anywhere will include the patched Cilium version when it is available. -* The Bottlerocket project [will not be releasing](https://github.com/bottlerocket-os/bottlerocket/issues/3794) bare metal variants for Kubernetes versions v1.29 and beyond. Hence Bottlerocket is not a supported operating system for creating EKS Anywhere bare metal clusters with Kubernetes versions v1.29 and above. However, Bottlerocket is still supported for bare metal clusters running Kubernetes versions v1.28 and below. Please refer to [this](https://github.com/aws/eks-anywhere/issues/7754) pinned issue for more information regarding the deprecation. +* EKS Anywhere release `v0.19.0` introduces support for creating Kubernetes version v1.29 clusters. A conformance test was promoted in Kubernetes v1.29 that verifies that `Service`s serving different L4 protocols with the same port number can co-exist in a Kubernetes cluster. This is not supported in Cilium, the CNI deployed on EKS Anywhere clusters, because Cilium currently does not differentiate between TCP and UDP protocols for Kubernetes `Service`s. Hence EKS Anywhere v1.29 clusters will not pass this specific conformance test. This service protocol differentiation is being tracked in an upstream Cilium issue and will be supported in a future Cilium release. A future release of EKS Anywhere will include the patched Cilium version when it is available.
+ Refer to the following links for more information regarding the conformance test: + * [PR promoting multi-protocol `Service` test in Kubernetes v1.29](https://github.com/kubernetes/kubernetes/pull/120069) + * [Cilium issue for the multi-protocol `Service` feature](https://github.com/cilium/cilium/issues/9207) + * [Cilium issue for the Kubernetes v1.29 conformance failures](https://github.com/cilium/cilium/issues/29913) +* The Bottlerocket project will not be releasing bare metal variants for Kubernetes versions v1.29 and beyond. Hence Bottlerocket is not a supported operating system for creating EKS Anywhere bare metal clusters with Kubernetes versions v1.29 and above. However, Bottlerocket is still supported for bare metal clusters running Kubernetes versions v1.28 and below.
+ Refer to the following links for more information regarding the deprecation: + * [Bottlerocket announcement regarding deprecation of bare metal variants](https://github.com/bottlerocket-os/bottlerocket/issues/3794) + * [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) * On January 31, 2024, a **High**-severity vulnerability CVE-2024-21626 was published affecting all `runc` versions <= `v1.1.11`. This CVE has been fixed in runc version `v1.1.12`, which has been included in EKS Anywhere release `v0.18.6`. In order to fix this CVE in your new/existing EKS-A cluster, you **MUST** build or download new OS images pertaining to version `v0.18.6` and create/upgrade your cluster with these images.
Refer to the following links for more information on the steps to mitigate the CVE. * [AWS Security bulletin for the `runc` issue](https://aws.amazon.com/security/security-bulletins/AWS-2024-001) * [Building Ubuntu and Red Hat node images]({{< relref "../osmgmt/artifacts/#building-node-images" >}}) * [Downloading Bottlerocket node images]({{< relref "../osmgmt/artifacts/#download-bottlerocket-node-images" >}}) * [Upgrading an EKS Anywhere cluster]({{< relref "../clustermgmt/cluster-upgrades" >}}) +* EKS Anywhere version `v0.19.4` introduced a regression in the Curated Packages workflow due to a bug in the associated Packages controller version (`v0.4.2`). This will be fixed in the next patch release. {{% /alert %}} {{% alert title="General Information" color="info" %}} * When upgrading to a new minor version, a new OS image must be created using the new image-builder CLI pertaining to that release. {{% /alert %}} +## [v0.19.6](https://github.com/aws/eks-anywhere/releases/tag/v0.19.6) +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +### Changed +- Backporting dependency bumps to fix vulnerabilities [#8118](https://github.com/aws/eks-anywhere/pull/8118) +- Upgraded EKS-D: + - `v1-25-eks-37` to [`v1-25-eks-39`](https://distro.eks.amazonaws.com/releases/1-25/39/) + - `v1-26-eks-33` to [`v1-26-eks-35`](https://distro.eks.amazonaws.com/releases/1-26/35/) + - `v1-27-eks-27` to [`v1-27-eks-29`](https://distro.eks.amazonaws.com/releases/1-27/29/) + - `v1-28-eks-20` to [`v1-28-eks-22`](https://distro.eks.amazonaws.com/releases/1-28/22/) + - `v1-29-eks-9` to [`v1-29-eks-11`](https://distro.eks.amazonaws.com/releases/1-29/11/) + +### Fixed +- Fixed cluster directory being created with root ownership [#8120](https://github.com/aws/eks-anywhere/pull/8120) + +## [v0.19.5](https://github.com/aws/eks-anywhere/releases/tag/v0.19.5) +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +### Changed +- Upgraded EKS-Anywhere Packages from `v0.4.2` to [`v0.4.3`](https://github.com/aws/eks-anywhere-packages/releases/tag/v0.4.3) + +### Fixed +- Fixed registry mirror with authentication for EKS Anywhere packages + +## [v0.19.4](https://github.com/aws/eks-anywhere/releases/tag/v0.19.4) +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +### Changed +- Support Docs site for penultime EKS-A version [#8010](https://github.com/aws/eks-anywhere/pull/8010) +- Update Ubuntu 22.04 ISO URLs to latest stable release [#3114](https://github.com/aws/eks-anywhere-build-tooling/pull/3114) +- Upgraded EKS-D: + - `v1-25-eks-35` to [`v1-25-eks-37`](https://distro.eks.amazonaws.com/releases/1-25/37/) + - `v1-26-eks-31` to [`v1-26-eks-33`](https://distro.eks.amazonaws.com/releases/1-26/33/) + - `v1-27-eks-25` to [`v1-27-eks-27`](https://distro.eks.amazonaws.com/releases/1-27/27/) + - `v1-28-eks-18` to [`v1-28-eks-20`](https://distro.eks.amazonaws.com/releases/1-28/20/) + - `v1-29-eks-7` to [`v1-29-eks-9`](https://distro.eks.amazonaws.com/releases/1-29/9/) + +### Fixed +- Added processor for Tinkerbell Template Config [#7816](https://github.com/aws/eks-anywhere/issues/7816) +- Added nil check for eksa-version when setting etcd url [#8018](https://github.com/aws/eks-anywhere/pull/8018) +- Fixed registry mirror secret credentials set to empty [#7933](https://github.com/aws/eks-anywhere/pull/7933) + +## [v0.19.3](https://github.com/aws/eks-anywhere/releases/tag/v0.19.3) + +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +### Changed +- Updated helm to v3.14.3 [#3050](https://github.com/aws/eks-anywhere-build-tooling/pull/3050) + +### Fixed +- Bumped golang.org/x/net that has a fix for [vulnerability GO-2024-2687](https://pkg.go.dev/vuln/GO-2024-2687) +- Fixed proxy configurations for airgapped environments [#7913](https://github.com/aws/eks-anywhere/pull/7913) + +## [v0.19.2](https://github.com/aws/eks-anywhere/releases/tag/v0.19.2) + +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +### Changed +- Update CAPC to 0.4.10-rc1 [#3105](https://github.com/aws/eks-anywhere-build-tooling/pull/3015) +- Upgraded EKS-D: + - `v1-25-eks-34` to [`v1-25-eks-35`](https://distro.eks.amazonaws.com/releases/1-25/35/) + - `v1-26-eks-30` to [`v1-26-eks-31`](https://distro.eks.amazonaws.com/releases/1-26/31/) + - `v1-27-eks-24` to [`v1-27-eks-25`](https://distro.eks.amazonaws.com/releases/1-27/25/) + - `v1-28-eks-17` to [`v1-28-eks-18`](https://distro.eks.amazonaws.com/releases/1-28/18/) + - `v1-29-eks-6` to [`v1-29-eks-7`](https://distro.eks.amazonaws.com/releases/1-29/7/) + +### Fixed +- Fixing tinkerbell action image URIs while using registry mirror with proxy cache. + ## [v0.19.1](https://github.com/aws/eks-anywhere/releases/tag/v0.19.1) ### Supported OS version details @@ -30,10 +152,12 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + ### Changed - Upgraded EKS-D: - `v1-25-eks-32` to [`v1-25-eks-34`](https://distro.eks.amazonaws.com/releases/1-25/34/) @@ -58,14 +182,17 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.0 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.0 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + ### Added - Support for Kubernetes v1.29 - Support for in-place EKS Anywhere and Kubernetes version upgrades on Bare Metal clusters - Support for horizontally scaling `etcd` count in clusters with external `etcd` deployments ([#7127](https://github.com/aws/eks-anywhere/pull/7127)) +- External `etcd` support for Nutanix ([#7550](https://github.com/aws/eks-anywhere/pull/7550)) - Etcd encryption for Nutanix ([#7565](https://github.com/aws/eks-anywhere/pull/7565)) - Nutanix Cloud Controller Manager integration ([#7534](https://github.com/aws/eks-anywhere/pull/7534)) - Enable image signing for all images used in cluster operations diff --git a/docs/layouts/partials/navbar-version-selector.html b/docs/layouts/partials/navbar-version-selector.html new file mode 100644 index 000000000000..6475f87141c0 --- /dev/null +++ b/docs/layouts/partials/navbar-version-selector.html @@ -0,0 +1,9 @@ + + \ No newline at end of file diff --git a/go.mod b/go.mod index 71dc1b541c50..0c9a3399e8b1 100644 --- a/go.mod +++ b/go.mod @@ -5,18 +5,18 @@ go 1.21 require ( github.com/Masterminds/sprig v2.22.0+incompatible github.com/aws/aws-sdk-go v1.50.36 - github.com/aws/aws-sdk-go-v2 v1.25.3 + github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/aws-sdk-go-v2/config v1.26.6 github.com/aws/aws-sdk-go-v2/credentials v1.17.7 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3 github.com/aws/aws-sdk-go-v2/service/ec2 v1.146.0 - github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 + github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 github.com/aws/eks-anywhere-packages v0.3.9 github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice v0.0.0-00010101000000-000000000000 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e github.com/aws/etcdadm-bootstrap-provider v1.0.12 github.com/aws/etcdadm-controller v1.0.19 - github.com/aws/smithy-go v1.20.1 + github.com/aws/smithy-go v1.20.2 github.com/bmc-toolbox/bmclib/v2 v2.1.1-0.20231206130132-1063371b9ed6 github.com/docker/cli v25.0.3+incompatible github.com/ghodss/yaml v1.0.0 @@ -28,8 +28,8 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/go-github/v35 v35.3.0 - github.com/google/uuid v1.5.0 - github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.1 + github.com/google/uuid v1.6.0 + github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.2 github.com/nutanix-cloud-native/prism-go-client v0.3.4 github.com/onsi/gomega v1.30.0 github.com/opencontainers/image-spec v1.1.0 @@ -37,26 +37,25 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tinkerbell/cluster-api-provider-tinkerbell v0.1.1-0.20220615214617-9e9c2a397288 github.com/tinkerbell/tink v0.8.0 - github.com/vmware/govmomi v0.34.2 + github.com/vmware/govmomi v0.37.2 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.19.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/net v0.20.0 + golang.org/x/net v0.23.0 golang.org/x/oauth2 v0.15.0 - golang.org/x/sys v0.17.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - helm.sh/helm/v3 v3.14.2 - k8s.io/api v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/apiserver v0.29.3 - k8s.io/client-go v0.29.3 - k8s.io/component-base v0.29.3 + helm.sh/helm/v3 v3.14.4 + k8s.io/api v0.29.5 + k8s.io/apimachinery v0.29.5 + k8s.io/apiserver v0.29.5 + k8s.io/client-go v0.29.5 + k8s.io/component-base v0.29.5 k8s.io/klog/v2 v2.110.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e oras.land/oras-go v1.2.5 @@ -84,8 +83,8 @@ require ( github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/VictorLowther/simplexml v0.0.0-20180716164440-0bff93621230 // indirect github.com/VictorLowther/soap v0.0.0-20150314151524-8e36fca84b22 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 // indirect @@ -99,7 +98,7 @@ require ( github.com/bmc-toolbox/common v0.0.0-20230717121556-5eb9915a8a5a // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/containerd/containerd v1.7.11 // indirect + github.com/containerd/containerd v1.7.12 // indirect github.com/containerd/log v0.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect @@ -183,7 +182,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -196,6 +196,7 @@ require ( k8s.io/apiextensions-apiserver v0.29.1 // indirect k8s.io/cluster-bootstrap v0.28.5 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubelet v0.29.5 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 710947e25135..d6e077f4e876 100644 --- a/go.sum +++ b/go.sum @@ -124,24 +124,24 @@ github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z github.com/aws/aws-sdk-go v1.42.23/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs= github.com/aws/aws-sdk-go v1.50.36 h1:PjWXHwZPuTLMR1NIb8nEjLucZBMzmf84TLoLbD8BZqk= github.com/aws/aws-sdk-go v1.50.36/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.25.3 h1:xYiLpZTQs1mzvz5PaI6uR0Wh57ippuEthxS4iK5v0n0= -github.com/aws/aws-sdk-go-v2 v1.25.3/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= github.com/aws/aws-sdk-go-v2/credentials v1.17.7 h1:WJd+ubWKoBeRh7A5iNMnxEOs982SyVKOJD+K8HIezu4= github.com/aws/aws-sdk-go-v2/credentials v1.17.7/go.mod h1:UQi7LMR0Vhvs+44w5ec8Q+VS+cd10cjwgHwiVkE0YGU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3 h1:p+y7FvkK2dxS+FEwRIDHDe//ZX+jDhP8HHE50ppj4iI= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3/go.mod h1:/fYB+FZbDlwlAiynK9KDXlzZl3ANI9JkD0Uhz5FjNT4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3 h1:ifbIbHZyGl1alsAhPIYsHOg5MuApgqOvVeI8wIugXfs= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3/go.mod h1:oQZXg3c6SNeY6OZrDY+xHcF4VGIEoNotX2B4PrDeoJI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3 h1:Qvodo9gHG9F3E8SfYOspPeBt0bjSbsevK8WhRAUHcoY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3/go.mod h1:vCKrdLXtybdf/uQd/YfVR2r5pcbNuEYKzMQpcxmeSJw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.146.0 h1:d6pYx/CKADORpxqBINY7DuD4V1fjcj3IoeTPQilCw4Q= github.com/aws/aws-sdk-go-v2/service/ec2 v1.146.0/go.mod h1:hIsHE0PaWAQakLCshKS7VKWMGXaqrAFp4m95s2W9E6c= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 h1:3iaT/LnGV6jNtbBkvHZDlzz7Ky3wMHDJAyFtGd5GUJI= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7/go.mod h1:mtzCLxk6M+KZbkJdq3cUH9GCrudw8qCy5C3EHO+5vLc= +github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 h1:Qr9W21mzWT3RhfYn9iAux7CeRIdbnTAqmiOlASqQgZI= +github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 h1:K/NXvIftOlX+oGgWGIa3jDyYLDNsdVhsjHmsBH2GLAQ= @@ -160,8 +160,8 @@ github.com/aws/etcdadm-bootstrap-provider v1.0.12 h1:jSUKR+2wETNpjmYmtEC2a/SBbul github.com/aws/etcdadm-bootstrap-provider v1.0.12/go.mod h1:6hc4wSlAkioU7EAGCW8fg2F+w42OTgLxjs4/nVzxPQw= github.com/aws/etcdadm-controller v1.0.19 h1:AC6LLHb6hb02Fus3RanUvzJeRoiORGZQ3/d/UjKbsHY= github.com/aws/etcdadm-controller v1.0.19/go.mod h1:L710y0if8mrJhCmOQSUJF+9QcEOiemd4jXkKIc5Oeok= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -209,8 +209,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= -github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -538,8 +538,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -708,8 +708,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.1 h1:ugJfylfF06dnL/yi7GF1tC3S2CJrkQFDPjv5qYrrQGM= -github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.1/go.mod h1:wphe4ijJBkkMdg2ZScO/l7K/5RBAjhBGm3RsMbVjkow= +github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.2 h1:iFKeVVqMoz2VMAngWMvq89guMmhNUAfyw/cKRrFqD+c= +github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.2/go.mod h1:wphe4ijJBkkMdg2ZScO/l7K/5RBAjhBGm3RsMbVjkow= github.com/nutanix-cloud-native/prism-go-client v0.3.4 h1:bHY3VPrHHYnbRtkpGaKK+2ZmvUjNVRC55CYZbXIfnOk= github.com/nutanix-cloud-native/prism-go-client v0.3.4/go.mod h1:tTIH02E6o6AWSShr98QChoxuZl+jBhkXFixom9+fd1Y= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -856,8 +856,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -881,8 +882,8 @@ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4= github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc= github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= -github.com/vmware/govmomi v0.34.2 h1:o6ydkTVITOkpQU6HAf6tP5GvHFCNJlNUNlMsvFK77X4= -github.com/vmware/govmomi v0.34.2/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= +github.com/vmware/govmomi v0.37.2 h1:5ANLoaTxWv600ZnoosJ2zXbM3A+EaxqGheEZbRN8YVE= +github.com/vmware/govmomi v0.37.2/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -974,8 +975,9 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1076,8 +1078,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1184,8 +1186,9 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1193,8 +1196,9 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1454,8 +1458,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.14.2 h1:V71fv+NGZv0icBlr+in1MJXuUIHCiPG1hW9gEBISTIA= -helm.sh/helm/v3 v3.14.2/go.mod h1:2itvvDv2WSZXTllknfQo6j7u3VVgMAvm8POCDgYH424= +helm.sh/helm/v3 v3.14.4 h1:6FSpEfqyDalHq3kUr4gOMThhgY55kXUEjdQoyODYnrM= +helm.sh/helm/v3 v3.14.4/go.mod h1:Tje7LL4gprZpuBNTbG34d1Xn5NmRT3OWfBRwpOSer9I= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1465,29 +1469,29 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/api v0.29.5 h1:levS+umUigHCfI3riD36pMY1vQEbrzh4r1ivVWAhHaI= +k8s.io/api v0.29.5/go.mod h1:7b18TtPcJzdjk7w5zWyIHgoAtpGeRvGGASxlS7UZXdQ= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apimachinery v0.29.5 h1:Hofa2BmPfpoT+IyDTlcPdCHSnHtEQMoJYGVoQpRTfv4= +k8s.io/apimachinery v0.29.5/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= -k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/apiserver v0.29.5 h1:223C+JkRnGmudEU00GfpX6quDSrzwwP0DuXOYTyUYb0= +k8s.io/apiserver v0.29.5/go.mod h1:zN9xdatz5g7XwL1Xoz9hD4QQON1GN0c+1kV5e/NHejM= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/client-go v0.29.5 h1:nlASXmPQy190qTteaVP31g3c/wi2kycznkTP7Sv1zPc= +k8s.io/client-go v0.29.5/go.mod h1:aY5CnqUUvXYccJhm47XHoPcRyX6vouHdIBHaKZGTbK4= k8s.io/cluster-bootstrap v0.28.5 h1:KyFY6l5xK5oxRjjGotgivlbQ0AReRctMMoNpxSJaJxM= k8s.io/cluster-bootstrap v0.28.5/go.mod h1:nJzrDb8AWtUm1RSoXx+lDb2f7i54Ndfx4v8x3s4kZ2Y= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= -k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-base v0.29.5 h1:Ptj8AzG+p8c2a839XriHwxakDpZH9uvIgYz+o1agjg8= +k8s.io/component-base v0.29.5/go.mod h1:9nBUoPxW/yimISIgAG7sJDrUGJlu7t8HnDafIrOdU8Q= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1503,6 +1507,8 @@ k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKf k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubelet v0.29.5 h1:tYYyc2JcrDt8jFYTsKpgcIpp+S5a/nm85CY4liosprw= +k8s.io/kubelet v0.29.5/go.mod h1:eWJR0OtRRkLwKEYjsQXcTyTZlSfgR3Py1xJVFa0ISTk= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/internal/pkg/api/cluster.go b/internal/pkg/api/cluster.go index 07cb59fe0660..a2fd6f03b58a 100644 --- a/internal/pkg/api/cluster.go +++ b/internal/pkg/api/cluster.go @@ -271,7 +271,7 @@ func WithProxyConfig(httpProxy, httpsProxy string, noProxy []string) ClusterFill } // WithRegistryMirror adds a registry mirror configuration. -func WithRegistryMirror(endpoint, port string, caCert string, authenticate bool, insecureSkipVerify bool) ClusterFiller { +func WithRegistryMirror(endpoint, port string, caCert string, authenticate bool, insecureSkipVerify bool, ociNamespaces ...anywherev1.OCINamespace) ClusterFiller { return func(c *anywherev1.Cluster) { if c.Spec.RegistryMirrorConfiguration == nil { c.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{} @@ -281,6 +281,10 @@ func WithRegistryMirror(endpoint, port string, caCert string, authenticate bool, c.Spec.RegistryMirrorConfiguration.CACertContent = caCert c.Spec.RegistryMirrorConfiguration.Authenticate = authenticate c.Spec.RegistryMirrorConfiguration.InsecureSkipVerify = insecureSkipVerify + + if len(ociNamespaces) != 0 { + c.Spec.RegistryMirrorConfiguration.OCINamespaces = ociNamespaces + } } } diff --git a/internal/pkg/conformance/download.go b/internal/pkg/conformance/download.go index e51028c27f52..d6e85957ba19 100644 --- a/internal/pkg/conformance/download.go +++ b/internal/pkg/conformance/download.go @@ -1,37 +1,57 @@ package conformance import ( - "bytes" + "encoding/json" "fmt" - - "golang.org/x/sys/unix" + "io" + "net/http" + "runtime" + "strings" "github.com/aws/eks-anywhere/internal/pkg/files" ) const ( - destinationFile = "sonobuoy" - sonobuoyDarwin = "https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.53.2/sonobuoy_0.53.2_darwin_amd64.tar.gz" - sonobuoyLinux = "https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.53.2/sonobuoy_0.53.2_linux_amd64.tar.gz" + destinationFile = "sonobuoy" + sonobouyGitHubAPI = "https://api.github.com/repos/vmware-tanzu/sonobuoy/releases/latest" ) +type githubRelease struct { + Assets []asset `json:"assets"` +} + +type asset struct { + BrowserDownloadURL string `json:"browser_download_url"` +} + func Download() error { - var err error - var utsname unix.Utsname - err = unix.Uname(&utsname) + resp, err := http.Get(sonobouyGitHubAPI) + if err != nil { + return fmt.Errorf("getting latest sonobouy version from GitHub: %v", err) + } + body, err := io.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("uname call failure: %v", err) + return fmt.Errorf("reading the response body for sonobouy release: %v", err) + } + + sonobouyRelease := githubRelease{} + if err := json.Unmarshal(body, &sonobouyRelease); err != nil { + return fmt.Errorf("unmarshalling the response body for sonobouy release: %v", err) } - var downloadFile string - sysname := string(bytes.Trim(utsname.Sysname[:], "\x00")) - if sysname == "Darwin" { - downloadFile = sonobuoyDarwin - } else { - downloadFile = sonobuoyLinux + downloadURL := "" + for _, asset := range sonobouyRelease.Assets { + if strings.Contains(asset.BrowserDownloadURL, runtime.GOOS) && strings.Contains(asset.BrowserDownloadURL, runtime.GOARCH) { + downloadURL = asset.BrowserDownloadURL + } } - fmt.Println("Downloading sonobuoy for " + sysname + ": " + downloadFile) - err = files.GzipFileDownloadExtract(downloadFile, destinationFile, "") + + if downloadURL == "" { + return fmt.Errorf("no binaries found for sonobouy for OS %s and ARCH %s", runtime.GOOS, runtime.GOARCH) + } + + fmt.Printf("Downloading sonobuoy from %s\n", downloadURL) + err = files.GzipFileDownloadExtract(downloadURL, destinationFile, "") if err != nil { return fmt.Errorf("failed to download sonobouy: %v", err) } diff --git a/internal/pkg/ssm/command.go b/internal/pkg/ssm/command.go index bd25e7abb910..60d50880e6d0 100644 --- a/internal/pkg/ssm/command.go +++ b/internal/pkg/ssm/command.go @@ -24,7 +24,7 @@ var initE2EDirCommand = "mkdir -p /home/e2e/bin && cd /home/e2e" // WaitForSSMReady waits for the SSM command to be ready. func WaitForSSMReady(session *session.Session, instanceID string, timeout time.Duration) error { - err := retrier.Retry(10, 20*time.Second, func() error { + err := retrier.Retry(20, 20*time.Second, func() error { return Run(session, logr.Discard(), instanceID, "ls", timeout) }) if err != nil { diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index fb2a42db833d..67c7ec16e334 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -3,27 +3,35 @@ package cleanup import ( "context" "fmt" + "os" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go/aws/session" + "github.com/bmc-toolbox/bmclib/v2" + "github.com/go-logr/logr" prismgoclient "github.com/nutanix-cloud-native/prism-go-client" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" + "github.com/aws/eks-anywhere/internal/pkg/api" "github.com/aws/eks-anywhere/internal/pkg/ec2" "github.com/aws/eks-anywhere/internal/pkg/s3" + "github.com/aws/eks-anywhere/pkg/errors" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/providers/nutanix" + "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware" "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/validations" ) const ( - cleanupRetries = 5 - retryBackoff = 10 * time.Second + cleanupRetries = 5 + retryBackoff = 10 * time.Second + cloudstackNetworkVar = "T_CLOUDSTACK_NETWORK" ) func CleanUpAwsTestResources(storageBucket string, maxAge string, tag string) error { @@ -95,7 +103,9 @@ func VsphereRmVms(ctx context.Context, clusterName string, opts ...executables.G return govc.CleanupVms(ctx, clusterName, false) } -func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dryRun bool) error { +// CloudstackTestResources cleans up resources on the CloudStack environment. +// This can include VMs as well as duplicate networks. +func CloudstackTestResources(ctx context.Context, clusterName string, dryRun bool, deleteDuplicateNetworks bool) error { executableBuilder, close, err := executables.InitInDockerExecutablesBuilder(ctx, executables.DefaultEksaImage()) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) @@ -128,11 +138,30 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry if len(errorsMap) > 0 { return fmt.Errorf("cleaning up VMs: %+v", errorsMap) } + + return cleanupCloudstackDuplicateNetworks(ctx, cmk, execConfig, deleteDuplicateNetworks) +} + +func cleanupCloudstackDuplicateNetworks(ctx context.Context, cmk *executables.Cmk, execConfig *decoder.CloudStackExecConfig, deleteDuplicateNetworks bool) error { + if !deleteDuplicateNetworks { + return nil + } + + networkName, set := os.LookupEnv(cloudstackNetworkVar) + if !set { + return fmt.Errorf("ensuring no duplicate networks, %s is not set", cloudstackNetworkVar) + } + + for _, profile := range execConfig.Profiles { + if err := cmk.EnsureNoDuplicateNetwork(ctx, profile.Name, networkName); err != nil { + return err + } + } return nil } -// NutanixTestResourcesCleanup cleans up any leftover VMs in Nutanix after a test run. -func NutanixTestResourcesCleanup(ctx context.Context, clusterName, endpoint, port string, insecure, ignoreErrors bool) error { +// NutanixTestResources cleans up any leftover VMs in Nutanix after a test run. +func NutanixTestResources(clusterName, endpoint, port string, insecure, ignoreErrors bool) error { creds := nutanix.GetCredsFromEnv() nutanixCreds := prismgoclient.Credentials{ URL: fmt.Sprintf("%s:%s", endpoint, port), @@ -166,3 +195,86 @@ func NutanixTestResourcesCleanup(ctx context.Context, clusterName, endpoint, por } return nil } + +// TinkerbellTestResources cleans up machines by powering them down. +func TinkerbellTestResources(inventoryCSVFilePath string, ignoreErrors bool) error { + hardwarePool, err := api.NewHardwareMapFromFile(inventoryCSVFilePath) + if err != nil { + return fmt.Errorf("failed to create hardware map from inventory csv: %v", err) + } + + logger.Info("Powering off hardware: %+v", hardwarePool) + return powerOffHardwarePool(hardwarePool, ignoreErrors) +} + +func powerOffHardwarePool(hardware map[string]*hardware.Machine, ignoreErrors bool) error { + errList := []error{} + for _, h := range hardware { + if err := powerOffHardware(h, ignoreErrors); err != nil { + errList = append(errList, err) + } + } + + if len(errList) > 0 { + return fmt.Errorf("failed to power off %d hardware: %+v", len(errList), errors.NewAggregate(errList)) + } + + return nil +} + +func powerOffHardware(h *hardware.Machine, ignoreErrors bool) (reterror error) { + ctx, done := context.WithTimeout(context.Background(), 2*time.Minute) + defer done() + bmcClient := newBmclibClient(logr.Discard(), h.BMCIPAddress, h.BMCUsername, h.BMCPassword) + + if err := bmcClient.Open(ctx); err != nil { + md := bmcClient.GetMetadata() + logger.Info("Warning: Failed to open connection to BMC: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) + return handlePowerOffHardwareError(err, ignoreErrors) + } + + md := bmcClient.GetMetadata() + logger.Info("Connected to BMC: hardware: %v, providersAttempted: %v, successfulProvider: %v", h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) + + defer func() { + if err := bmcClient.Close(ctx); err != nil { + md := bmcClient.GetMetadata() + logger.Info("Warning: BMC close connection failed: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.FailedProviderDetail) + reterror = handlePowerOffHardwareError(err, ignoreErrors) + } + }() + + state, err := bmcClient.GetPowerState(ctx) + if err != nil { + state = "unknown" + } + if strings.Contains(strings.ToLower(state), "off") { + return nil + } + + if _, err := bmcClient.SetPowerState(ctx, "off"); err != nil { + md := bmcClient.GetMetadata() + logger.Info("Warning: failed to power off hardware: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) + return handlePowerOffHardwareError(err, ignoreErrors) + } + + return nil +} + +func handlePowerOffHardwareError(err error, ignoreErrors bool) error { + if err != nil && !ignoreErrors { + return err + } + return nil +} + +// newBmclibClient creates a new BMClib client. +func newBmclibClient(log logr.Logger, hostIP, username, password string) *bmclib.Client { + o := []bmclib.Option{} + log = log.WithValues("host", hostIP, "username", username) + o = append(o, bmclib.WithLogger(log)) + client := bmclib.NewClient(hostIP, username, password, o...) + client.Registry.Drivers = client.Registry.PreferProtocol("redfish") + + return client +} diff --git a/internal/test/e2e/cloudwatch.go b/internal/test/e2e/cloudwatch.go new file mode 100644 index 000000000000..461e8ab2f972 --- /dev/null +++ b/internal/test/e2e/cloudwatch.go @@ -0,0 +1,96 @@ +package e2e + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" + + "github.com/aws/eks-anywhere/pkg/logger" +) + +var svc *cloudwatch.CloudWatch + +const integrationTestCloudWatchNamespaceOverrideEnvVar = "INTEGRATION_TEST_CLOUDWATCH_NAMESPACE_OVERRIDE" + +func init() { + if s, err := session.NewSession(); err == nil { + svc = cloudwatch.New(s) + } else { + fmt.Println("Cannot create CloudWatch service", err) + } +} + +func putInstanceTestResultMetrics(r instanceTestsResults) { + if svc == nil { + logger.Info("Cannot publish metrics as cloudwatch service was not initialized") + return + } + + logger.Info("Publishing instance test result metrics") + // Note 0 metrics are emitted for the purpose of aggregation. For example, when the succeededCount metrics are [0, 1, 0, 1], we can calculate the success rate as 2 / 4 = 50%. However, when 0 are excluded, the metrics becomes [1, 1], and you would not be able to calculate the success rate from that series. + erroredCount, failedCount, succeededCount := 0, 0, 0 + if r.err != nil { + erroredCount = 1 + } else if !r.testCommandResult.Successful() { + failedCount = 1 + } else { + succeededCount = 1 + } + + data := &cloudwatch.MetricDatum{ + Unit: aws.String("Count"), + Dimensions: []*cloudwatch.Dimension{ + { + Name: aws.String("Provider"), + Value: aws.String(getProviderName(r.conf.Regex)), + }, + { + Name: aws.String("BranchName"), + Value: aws.String(r.conf.BranchName), + }, + }, + Timestamp: aws.Time(time.Now()), + } + putMetric(data, "ErroredInstanceTests", erroredCount) + putMetric(data, "FailedInstanceTests", failedCount) + putMetric(data, "SucceededInstanceTests", succeededCount) + + // TODO: publish time metrics + logger.Info("Test instance metrics published") +} + +func getProviderName(testRe string) string { + providerRe := regexp.MustCompile(`Test((?i:vsphere)|(?i:cloudstack)|(?i:snow)|(?i:docker)|(?i:nutanix)|(?i:tinkerbell))`) + provider := []byte("Unknown") + t := providerRe.FindSubmatch([]byte(testRe)) + if len(t) > 1 { + provider = t[1] + } + return strings.ToLower(string(provider)) +} + +func putMetric(data *cloudwatch.MetricDatum, metricName string, value int) { + data.MetricName = aws.String(metricName) + data.Value = aws.Float64(float64(value)) + + namespace := "EksaE2ETests" + namespaceOverride := os.Getenv(integrationTestCloudWatchNamespaceOverrideEnvVar) + if namespaceOverride != "" { + namespace = namespaceOverride + } + + if _, err := svc.PutMetricData(&cloudwatch.PutMetricDataInput{ + Namespace: aws.String(namespace), + MetricData: []*cloudwatch.MetricDatum{data}, + }); err != nil { + logger.Error(err, "Cannot put metrics to cloudwatch", "metricName", metricName, "value", value) + } else { + logger.Info("Instance test result metrics published", "metricName", metricName, "value", value) + } +} diff --git a/internal/test/e2e/registryMirror.go b/internal/test/e2e/registryMirror.go index cc8c7fdf49fc..c09b1d65c1d2 100644 --- a/internal/test/e2e/registryMirror.go +++ b/internal/test/e2e/registryMirror.go @@ -56,7 +56,9 @@ func (e *E2ESession) setupRegistryMirrorEnv(testRegex string) error { } if endpoint != "" && port != "" && caCert != "" { - return e.mountRegistryCert(caCert, net.JoinHostPort(endpoint, port)) + if err := e.mountRegistryCert(caCert, net.JoinHostPort(endpoint, port)); err != nil { + return err + } } re = regexp.MustCompile(`^.*Docker.*Airgapped.*$`) @@ -71,6 +73,16 @@ func (e *E2ESession) setupRegistryMirrorEnv(testRegex string) error { } } + re = regexp.MustCompile(`^.*OciNamespaces.*$`) + if re.MatchString(testRegex) { + ociNamespacesEnvVar := e2etests.RequiredOciNamespacesEnvVars() + for _, eVar := range ociNamespacesEnvVar { + if val, ok := os.LookupEnv(eVar); ok { + e.testEnvVars[eVar] = val + } + } + } + return nil } diff --git a/internal/test/e2e/run.go b/internal/test/e2e/run.go index fa4c61b4899b..e5b97abaa5b9 100644 --- a/internal/test/e2e/run.go +++ b/internal/test/e2e/run.go @@ -47,7 +47,7 @@ type ParallelRunConf struct { Regex string TestsToSkip []string BundlesOverride bool - CleanupVms bool + CleanupResources bool TestReportFolder string BranchName string BaremetalBranchName string @@ -125,7 +125,7 @@ func RunTestsInParallel(conf ParallelRunConf) error { for c := range work { r := instanceTestsResults{conf: c} - r.conf.instanceId, r.testCommandResult, err = RunTests(c, invCatalogue) + r.conf.InstanceID, r.testCommandResult, err = RunTests(c, invCatalogue) if err != nil { r.err = err } @@ -146,29 +146,34 @@ func RunTestsInParallel(conf ParallelRunConf) error { completedInstances := 0 for r := range results { var result string + // This variable can be used in cloudwatch log insights query for e2e test success rate + succeeded := 0 // TODO: keeping the old logs temporarily for compatibility with the test tool // Once the tool is updated to support the unified message, remove them if r.err != nil { result = testResultError - conf.Logger.Error(r.err, "Failed running e2e tests for instance", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "tests", r.conf.regex, "status", testResultFail) + conf.Logger.Error(r.err, "Failed running e2e tests for instance", "jobId", r.conf.JobID, "instanceId", r.conf.InstanceID, "tests", r.conf.Regex, "status", testResultFail) failedInstances++ } else if !r.testCommandResult.Successful() { result = testResultFail - conf.Logger.Info("An e2e instance run has failed", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "commandId", r.testCommandResult.CommandId, "tests", r.conf.regex, "status", testResultFail) + conf.Logger.Info("An e2e instance run has failed", "jobId", r.conf.JobID, "instanceId", r.conf.InstanceID, "commandId", r.testCommandResult.CommandId, "tests", r.conf.Regex, "status", testResultFail) failedInstances++ } else { result = testResultPass - conf.Logger.Info("Instance tests completed successfully", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "commandId", r.testCommandResult.CommandId, "tests", r.conf.regex, "status", testResultPass) + succeeded = 1 + conf.Logger.Info("Instance tests completed successfully", "jobId", r.conf.JobID, "instanceId", r.conf.InstanceID, "commandId", r.testCommandResult.CommandId, "tests", r.conf.Regex, "status", testResultPass) } completedInstances++ conf.Logger.Info("Instance tests run finished", "result", result, - "tests", r.conf.regex, - "jobId", r.conf.jobId, - "instanceId", r.conf.instanceId, + "tests", r.conf.Regex, + "jobId", r.conf.JobID, + "instanceId", r.conf.InstanceID, "completedInstances", completedInstances, "totalInstances", totalInstances, + "succeeded", succeeded, ) + putInstanceTestResultMetrics(r) } if failedInstances > 0 { @@ -179,29 +184,35 @@ func RunTestsInParallel(conf ParallelRunConf) error { } type instanceRunConf struct { - session *session.Session - instanceProfileName, storageBucket, jobId, parentJobId, regex, instanceId string - testReportFolder, branchName string - ipPool networkutils.IPPool - hardware []*api.Hardware - hardwareCount int - tinkerbellAirgappedTest bool - bundlesOverride bool - testRunnerType TestRunnerType - testRunnerConfig TestInfraConfig - cleanupVms bool - logger logr.Logger + InstanceProfileName string + StorageBucket string + JobID string + ParentJobID string + Regex string + InstanceID string + TestReportFolder string + BranchName string + IPPool networkutils.IPPool + Hardware []*api.Hardware + HardwareCount int + TinkerbellAirgappedTest bool + BundlesOverride bool + TestRunnerType TestRunnerType + TestRunnerConfig TestInfraConfig + CleanupResources bool + Logger logr.Logger + Session *session.Session } //nolint:gocyclo, revive // RunTests responsible launching test runner to run tests is complex. func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatalogue) (testInstanceID string, testCommandResult *testCommandResult, err error) { - testRunner, err := newTestRunner(conf.testRunnerType, conf.testRunnerConfig) + testRunner, err := newTestRunner(conf.TestRunnerType, conf.TestRunnerConfig) if err != nil { return "", nil, err } - if conf.hardwareCount > 0 { + if conf.HardwareCount > 0 { var hardwareCatalogue *hardwareCatalogue - if conf.tinkerbellAirgappedTest { + if conf.TinkerbellAirgappedTest { hardwareCatalogue = inventoryCatalogue[airgappedHardware] } else { hardwareCatalogue = inventoryCatalogue[nonAirgappedHardware] @@ -214,16 +225,25 @@ func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatal defer releaseTinkerbellHardware(&conf, hardwareCatalogue) } + conf.Logger.Info("Creating runner instance", + "instance_profile_name", conf.InstanceProfileName, "storage_bucket", conf.StorageBucket, + "parent_job_id", conf.ParentJobID, "regex", conf.Regex, "test_report_folder", conf.TestReportFolder, + "branch_name", conf.BranchName, "ip_pool", conf.IPPool.ToString(), + "hardware_count", conf.HardwareCount, "tinkerbell_airgapped_test", conf.TinkerbellAirgappedTest, + "bundles_override", conf.BundlesOverride, "test_runner_type", conf.TestRunnerType, + "cleanup_resources", conf.CleanupResources) + instanceId, err := testRunner.createInstance(conf) if err != nil { return "", nil, err } - conf.logger.V(1).Info("TestRunner instance has been created", "instanceId", instanceId) + conf.Logger = conf.Logger.WithValues("instance_id", instanceId) + conf.Logger.Info("TestRunner instance has been created") defer func() { err := testRunner.decommInstance(conf) if err != nil { - conf.logger.V(1).Info("WARN: Failed to decomm e2e test runner instance", "error", err) + conf.Logger.V(1).Info("WARN: Failed to decomm e2e test runner instance", "error", err) } }() @@ -232,12 +252,12 @@ func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatal return "", nil, err } - err = session.setup(conf.regex) + err = session.setup(conf.Regex) if err != nil { return session.instanceId, nil, err } - testCommandResult, err = session.runTests(conf.regex) + testCommandResult, err = session.runTests(conf.Regex) if err != nil { return session.instanceId, nil, err } @@ -288,13 +308,13 @@ func (e *E2ESession) runTests(regex string) (testCommandResult *testCommandResul } func (c instanceRunConf) runPostTestsProcessing(e *E2ESession, testCommandResult *testCommandResult) error { - regex := strings.Trim(c.regex, "\"") + regex := strings.Trim(c.Regex, "\"") tests := strings.Split(regex, "|") for _, testName := range tests { e.uploadJUnitReportFromInstance(testName) - if c.testReportFolder != "" { - e.downloadJUnitReportToLocalDisk(testName, c.testReportFolder) + if c.TestReportFolder != "" { + e.downloadJUnitReportToLocalDisk(testName, c.TestReportFolder) } if !testCommandResult.Successful() { @@ -486,30 +506,30 @@ func getTinkerbellTestsWithCount(tinkerbellTests []string, conf ParallelRunConf) func newInstanceRunConf(awsSession *session.Session, conf ParallelRunConf, jobNumber int, testRegex string, ipPool networkutils.IPPool, hardware []*api.Hardware, hardwareCount int, tinkerbellAirgappedTest bool, testRunnerType TestRunnerType, testRunnerConfig *TestInfraConfig) instanceRunConf { jobID := fmt.Sprintf("%s-%d", conf.JobId, jobNumber) return instanceRunConf{ - session: awsSession, - instanceProfileName: conf.InstanceProfileName, - storageBucket: conf.StorageBucket, - jobId: jobID, - parentJobId: conf.JobId, - regex: testRegex, - ipPool: ipPool, - hardware: hardware, - hardwareCount: hardwareCount, - tinkerbellAirgappedTest: tinkerbellAirgappedTest, - bundlesOverride: conf.BundlesOverride, - testReportFolder: conf.TestReportFolder, - branchName: conf.BranchName, - cleanupVms: conf.CleanupVms, - testRunnerType: testRunnerType, - testRunnerConfig: *testRunnerConfig, - logger: conf.Logger.WithValues("jobID", jobID, "test", testRegex), + Session: awsSession, + InstanceProfileName: conf.InstanceProfileName, + StorageBucket: conf.StorageBucket, + JobID: jobID, + ParentJobID: conf.JobId, + Regex: testRegex, + IPPool: ipPool, + Hardware: hardware, + HardwareCount: hardwareCount, + TinkerbellAirgappedTest: tinkerbellAirgappedTest, + BundlesOverride: conf.BundlesOverride, + TestReportFolder: conf.TestReportFolder, + BranchName: conf.BranchName, + CleanupResources: conf.CleanupResources, + TestRunnerType: testRunnerType, + TestRunnerConfig: *testRunnerConfig, + Logger: conf.Logger.WithValues("jobID", jobID, "test", testRegex), } } func logTestGroups(logger logr.Logger, instancesConf []instanceRunConf) { testGroups := make([]string, 0, len(instancesConf)) for _, i := range instancesConf { - testGroups = append(testGroups, i.regex) + testGroups = append(testGroups, i.Regex) } logger.V(1).Info("Running tests in parallel", "testsGroups", testGroups) } @@ -551,24 +571,24 @@ func getAirgappedHardwarePool(storageBucket string) ([]*api.Hardware, error) { } func reserveTinkerbellHardware(conf *instanceRunConf, invCatalogue *hardwareCatalogue) error { - reservedTinkerbellHardware, err := invCatalogue.reserveHardware(conf.hardwareCount) + reservedTinkerbellHardware, err := invCatalogue.reserveHardware(conf.HardwareCount) if err != nil { return fmt.Errorf("timed out waiting for hardware") } - conf.hardware = reservedTinkerbellHardware + conf.Hardware = reservedTinkerbellHardware logTinkerbellTestHardwareInfo(conf, "Reserved") return nil } func releaseTinkerbellHardware(conf *instanceRunConf, invCatalogue *hardwareCatalogue) { logTinkerbellTestHardwareInfo(conf, "Releasing") - invCatalogue.releaseHardware(conf.hardware) + invCatalogue.releaseHardware(conf.Hardware) } func logTinkerbellTestHardwareInfo(conf *instanceRunConf, action string) { var hardwareInfo []string - for _, hardware := range conf.hardware { + for _, hardware := range conf.Hardware { hardwareInfo = append(hardwareInfo, hardware.Hostname) } - conf.logger.V(1).Info(action+" hardware for TestRunner", "hardwarePool", strings.Join(hardwareInfo, ", ")) + conf.Logger.V(1).Info(action+" hardware for TestRunner", "hardwarePool", strings.Join(hardwareInfo, ", ")) } diff --git a/internal/test/e2e/setup.go b/internal/test/e2e/setup.go index 554d4c98f995..ee66c8bcd3c8 100644 --- a/internal/test/e2e/setup.go +++ b/internal/test/e2e/setup.go @@ -40,7 +40,7 @@ type E2ESession struct { ipPool networkutils.IPPool testEnvVars map[string]string bundlesOverride bool - cleanupVms bool + cleanup bool requiredFiles []string branchName string hardware []*api.Hardware @@ -49,19 +49,19 @@ type E2ESession struct { func newE2ESession(instanceId string, conf instanceRunConf) (*E2ESession, error) { e := &E2ESession{ - session: conf.session, + session: conf.Session, instanceId: instanceId, - instanceProfileName: conf.instanceProfileName, - storageBucket: conf.storageBucket, - jobId: conf.jobId, - ipPool: conf.ipPool, + instanceProfileName: conf.InstanceProfileName, + storageBucket: conf.StorageBucket, + jobId: conf.JobID, + ipPool: conf.IPPool, testEnvVars: make(map[string]string), - bundlesOverride: conf.bundlesOverride, - cleanupVms: conf.cleanupVms, + bundlesOverride: conf.BundlesOverride, + cleanup: conf.CleanupResources, requiredFiles: requiredFiles, - branchName: conf.branchName, - hardware: conf.hardware, - logger: conf.logger, + branchName: conf.BranchName, + hardware: conf.Hardware, + logger: conf.Logger, } return e, nil @@ -179,6 +179,7 @@ func (e *E2ESession) setup(regex string) error { } ipPool := e.ipPool.ToString() + if ipPool != "" { e.testEnvVars[e2etests.ClusterIPPoolEnvVar] = ipPool } @@ -186,7 +187,7 @@ func (e *E2ESession) setup(regex string) error { // Adding JobId to Test Env variables e.testEnvVars[e2etests.JobIdVar] = e.jobId e.testEnvVars[e2etests.BundlesOverrideVar] = strconv.FormatBool(e.bundlesOverride) - e.testEnvVars[e2etests.CleanupVmsVar] = strconv.FormatBool(e.cleanupVms) + e.testEnvVars[e2etests.CleanupResourcesVar] = strconv.FormatBool(e.cleanup) if e.branchName != "" { e.testEnvVars[e2etests.BranchNameEnvVar] = e.branchName diff --git a/internal/test/e2e/testRunner.go b/internal/test/e2e/testRunner.go index 0ce0e08b957e..9f9e9421afbb 100644 --- a/internal/test/e2e/testRunner.go +++ b/internal/test/e2e/testRunner.go @@ -2,6 +2,7 @@ package e2e import ( "context" + "encoding/json" "fmt" "os" "strconv" @@ -132,10 +133,9 @@ func (v *VSphereTestRunner) setEnvironment() (map[string]string, error) { } func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { - name := getTestRunnerName(v.logger, c.jobId) - v.logger.V(1).Info("Creating vSphere Test Runner instance", "name", name) + name := getTestRunnerName(v.logger, c.JobID) - ssmActivationInfo, err := ssm.CreateActivation(c.session, name, c.instanceProfileName) + ssmActivationInfo, err := ssm.CreateActivation(c.Session, name, c.InstanceProfileName) if err != nil { return "", fmt.Errorf("unable to create ssm activation: %v", err) } @@ -152,9 +152,14 @@ func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { PropertyMapping: []vsphere.OVFProperty{ {Key: ssmActivationCodeKey, Value: ssmActivationInfo.ActivationCode}, {Key: ssmActivationIdKey, Value: ssmActivationInfo.ActivationID}, - {Key: ssmActivationRegionKey, Value: *c.session.Config.Region}, + {Key: ssmActivationRegionKey, Value: *c.Session.Config.Region}, }, } + optsJSON, err := json.Marshal(opts) + if err != nil { + return "", err + } + v.logger.V(1).Info("Creating vSphere Test Runner instance", "name", name, "ovf_deployment_opts", optsJSON) // deploy template if err := vsphere.DeployTemplate(v.envMap, v.Library, v.Template, name, v.Folder, v.Datacenter, v.Datastore, v.ResourcePool, opts); err != nil { @@ -163,7 +168,7 @@ func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { var ssmInstance *aws_ssm.InstanceInformation err = retrier.Retry(10, 5*time.Second, func() error { - ssmInstance, err = ssm.GetInstanceByActivationId(c.session, ssmActivationInfo.ActivationID) + ssmInstance, err = ssm.GetInstanceByActivationId(c.Session, ssmActivationInfo.ActivationID) if err != nil { return fmt.Errorf("failed to get ssm instance info post ovf deployment: %v", err) } @@ -180,19 +185,19 @@ func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { } func (e *Ec2TestRunner) createInstance(c instanceRunConf) (string, error) { - name := getTestRunnerName(e.logger, c.jobId) + name := getTestRunnerName(e.logger, c.JobID) e.logger.V(1).Info("Creating ec2 Test Runner instance", "name", name) - instanceId, err := ec2.CreateInstance(c.session, e.AmiID, key, tag, c.instanceProfileName, e.SubnetID, name) + instanceID, err := ec2.CreateInstance(c.Session, e.AmiID, key, tag, c.InstanceProfileName, e.SubnetID, name) if err != nil { return "", fmt.Errorf("creating instance for e2e tests: %v", err) } - e.logger.V(1).Info("Instance created", "instance-id", instanceId) - e.InstanceID = instanceId - return instanceId, nil + e.logger.V(1).Info("Instance created", "instance-id", instanceID) + e.InstanceID = instanceID + return instanceID, nil } func (v *VSphereTestRunner) tagInstance(c instanceRunConf, key, value string) error { - vmName := getTestRunnerName(v.logger, c.jobId) + vmName := getTestRunnerName(v.logger, c.JobID) vmPath := fmt.Sprintf("/%s/vm/%s/%s", v.Datacenter, v.Folder, vmName) tag := fmt.Sprintf("%s:%s", key, value) @@ -203,7 +208,7 @@ func (v *VSphereTestRunner) tagInstance(c instanceRunConf, key, value string) er } func (e *Ec2TestRunner) tagInstance(c instanceRunConf, key, value string) error { - err := ec2.TagInstance(c.session, e.InstanceID, key, value) + err := ec2.TagInstance(c.Session, e.InstanceID, key, value) if err != nil { return fmt.Errorf("failed to tag Ec2 test runner: %v", err) } @@ -211,9 +216,9 @@ func (e *Ec2TestRunner) tagInstance(c instanceRunConf, key, value string) error } func (v *VSphereTestRunner) decommInstance(c instanceRunConf) error { - _, deregisterError := ssm.DeregisterInstance(c.session, v.InstanceID) - _, deactivateError := ssm.DeleteActivation(c.session, v.ActivationId) - deleteError := cleanup.VsphereRmVms(context.Background(), getTestRunnerName(v.logger, c.jobId), executables.WithGovcEnvMap(v.envMap)) + _, deregisterError := ssm.DeregisterInstance(c.Session, v.InstanceID) + _, deactivateError := ssm.DeleteActivation(c.Session, v.ActivationId) + deleteError := cleanup.VsphereRmVms(context.Background(), getTestRunnerName(v.logger, c.JobID), executables.WithGovcEnvMap(v.envMap)) if deregisterError != nil { return fmt.Errorf("failed to decommission vsphere test runner ssm instance: %v", deregisterError) @@ -231,9 +236,9 @@ func (v *VSphereTestRunner) decommInstance(c instanceRunConf) error { } func (e *Ec2TestRunner) decommInstance(c instanceRunConf) error { - runnerName := getTestRunnerName(e.logger, c.jobId) + runnerName := getTestRunnerName(e.logger, c.JobID) e.logger.V(1).Info("Terminating ec2 Test Runner instance", "instanceID", e.InstanceID, "runner", runnerName) - if err := ec2.TerminateEc2Instances(c.session, aws.StringSlice([]string{e.InstanceID})); err != nil { + if err := ec2.TerminateEc2Instances(c.Session, aws.StringSlice([]string{e.InstanceID})); err != nil { return fmt.Errorf("terminating instance %s for runner %s: %w", e.InstanceID, runnerName, err) } diff --git a/internal/test/e2e/tinkerbell.go b/internal/test/e2e/tinkerbell.go index a9ca4532662d..796cf449841b 100644 --- a/internal/test/e2e/tinkerbell.go +++ b/internal/test/e2e/tinkerbell.go @@ -23,6 +23,7 @@ const ( maxHardwarePerE2ETestEnvVar = "T_TINKERBELL_MAX_HARDWARE_PER_TEST" tinkerbellDefaultMaxHardwarePerE2ETest = 4 tinkerbellBootstrapInterfaceEnvVar = "T_TINKERBELL_BOOTSTRAP_INTERFACE" + tinkerbellCIEnvironmentEnvVar = "T_TINKERBELL_CI_ENVIRONMENT" ) // TinkerbellTest maps each Tinkbell test with the hardware count needed for the test. @@ -80,7 +81,7 @@ func (e *E2ESession) setupTinkerbellEnv(testRegex string) error { } e.testEnvVars[tinkerbellInventoryCsvFilePathEnvVar] = inventoryFilePath - e.testEnvVars[e2etests.TinkerbellCIEnvironment] = "true" + e.testEnvVars[tinkerbellCIEnvironmentEnvVar] = "true" return nil } diff --git a/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE b/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE index 865670682200..b42771e96142 100644 --- a/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE +++ b/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE @@ -1 +1 @@ -2023-09-06-1694026927.2 +2024-04-01-1711929684.2 diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index 0d5d0d1ca6e4..aa750a011da3 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -15,10 +15,12 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation/field" yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/kubelet/config/v1beta1" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/constants" @@ -192,6 +194,8 @@ var clusterConfigValidations = []func(*Cluster) error{ validateControlPlaneCertSANs, validateControlPlaneAPIServerExtraArgs, validateControlPlaneAPIServerOIDCExtraArgs, + validateControlPlaneKubeletConfiguration, + validateWorkerNodeKubeletConfiguration, } // GetClusterConfig parses a Cluster object from a multiobject yaml file in disk @@ -225,7 +229,7 @@ func GetAndValidateClusterConfig(fileName string) (*Cluster, error) { // GetClusterDefaultKubernetesVersion returns the default kubernetes version for a Cluster. func GetClusterDefaultKubernetesVersion() KubernetesVersion { - return Kube129 + return Kube130 } // ValidateClusterConfigContent validates a Cluster object without modifying it @@ -396,9 +400,12 @@ func ValidateClusterName(clusterName string) error { } func ValidateClusterNameLength(clusterName string) error { - // vSphere has the maximum length for clusters to be 80 chars - if len(clusterName) > 80 { - return fmt.Errorf("number of characters in %v should be less than 81", clusterName) + // docker container hostname can have a maximum length of 64 characters. we append "-eks-a-cluster" + // to get the KinD cluster's name and on top of this, KinD also adds a "-control-plane suffix" to + // the cluster name to arrive at the name for the control plane node (container), which makes the + // control plane node name 64 characters in length. + if len(clusterName) > 35 { + return fmt.Errorf("number of characters in %v should be less than 36", clusterName) } return nil } @@ -527,6 +534,51 @@ func validateControlPlaneAPIServerOIDCExtraArgs(clusterConfig *Cluster) error { return nil } +func validateControlPlaneKubeletConfiguration(clusterConfig *Cluster) error { + cpKubeletConfig := clusterConfig.Spec.ControlPlaneConfiguration.KubeletConfiguration + + return validateKubeletConfiguration(cpKubeletConfig) +} + +func validateWorkerNodeKubeletConfiguration(clusterConfig *Cluster) error { + workerNodeGroupConfigs := clusterConfig.Spec.WorkerNodeGroupConfigurations + + for _, workerNodeGroupConfig := range workerNodeGroupConfigs { + wnKubeletConfig := workerNodeGroupConfig.KubeletConfiguration + + if err := validateKubeletConfiguration(wnKubeletConfig); err != nil { + return err + } + } + + return nil +} + +func validateKubeletConfiguration(eksakubeconfig *unstructured.Unstructured) error { + if eksakubeconfig == nil { + return nil + } + + var kubeletConfig v1beta1.KubeletConfiguration + + kcString, err := yaml.Marshal(eksakubeconfig) + if err != nil { + return err + } + + _, err = yaml.YAMLToJSONStrict([]byte(kcString)) + if err != nil { + return fmt.Errorf("unmarshaling the yaml, malformed yaml %v", err) + } + + err = yaml.UnmarshalStrict(kcString, &kubeletConfig) + if err != nil { + return fmt.Errorf("unmarshaling KubeletConfiguration for %v", err) + } + + return nil +} + func validateWorkerNodeGroups(clusterConfig *Cluster) error { workerNodeGroupConfigs := clusterConfig.Spec.WorkerNodeGroupConfigurations if len(workerNodeGroupConfigs) <= 0 { diff --git a/pkg/api/v1alpha1/cluster_test.go b/pkg/api/v1alpha1/cluster_test.go index 39146ef1e1ee..941484bb41cb 100644 --- a/pkg/api/v1alpha1/cluster_test.go +++ b/pkg/api/v1alpha1/cluster_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/features" @@ -79,13 +80,13 @@ func TestClusterNameLength(t *testing.T) { }{ { name: "SuccessClusterNameLength", - clusterName: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm", + clusterName: "cluster-name-less-than-36-chars", wantErr: nil, }, { name: "FailureClusterNameLength", - clusterName: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345", - wantErr: errors.New("number of characters in qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345 should be less than 81"), + clusterName: "cluster-name-equals-to-36-characters", + wantErr: errors.New("number of characters in cluster-name-equals-to-36-characters should be less than 36"), }, } @@ -1096,6 +1097,138 @@ func TestGetAndValidateClusterConfig(t *testing.T) { } } +type clusterOpt func(c *Cluster) + +func baseCluster(opts ...clusterOpt) *Cluster { + c := &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: ClusterSpec{ + ControlPlaneConfiguration: ControlPlaneConfiguration{ + Count: 1, + Endpoint: &Endpoint{ + Host: "1.1.1.1", + }, + MachineGroupRef: &Ref{}, + }, + WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{ + { + Count: ptr.Int(3), + MachineGroupRef: &Ref{ + Kind: VSphereMachineConfigKind, + Name: "eksa-unit-test-1", + }, + Name: "wn-1", + }, + }, + KubernetesVersion: Kube129, + ExternalEtcdConfiguration: &ExternalEtcdConfiguration{ + MachineGroupRef: &Ref{ + Kind: VSphereMachineConfigKind, + Name: "eksa-unit-test-etcd", + }, + Count: 1, + }, + DatacenterRef: Ref{ + Kind: VSphereDatacenterKind, + Name: "eksa-unit-test", + }, + ClusterNetwork: ClusterNetwork{ + CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}}, + Pods: Pods{ + CidrBlocks: []string{"192.168.0.0/16"}, + }, + Services: Services{ + CidrBlocks: []string{"10.96.0.0/12"}, + }, + }, + }, + } + + for _, opt := range opts { + opt(c) + } + + return c +} + +func TestValidateClusterConfigContent(t *testing.T) { + tests := []struct { + testName string + cluster *Cluster + wantErr bool + err string + }{ + { + testName: "valid cluster without kubelet", + cluster: baseCluster(), + wantErr: false, + }, + { + testName: "valid cluster with kubelet config for cp and wn", + cluster: baseCluster(func(c *Cluster) { + c.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + c.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + }), + wantErr: false, + }, + { + testName: "invalid cluster with kubelet config for cp", + cluster: baseCluster(func(c *Cluster) { + c.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPodss": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + }), + wantErr: true, + err: "unknown field", + }, + { + testName: "invalid cluster with kubelet config for wn", + cluster: baseCluster(func(c *Cluster) { + c.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPodss": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + }), + wantErr: true, + err: "unknown field", + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + err := ValidateClusterConfigContent(tt.cluster) + if (err != nil) != tt.wantErr { + t.Fatalf("ValidateClusterConfigContent() error = %v, wantErr %v", err, tt.wantErr) + } + + if len(tt.err) > 0 && !strings.Contains(err.Error(), tt.err) { + t.Fatalf("ValidateClusterConfigContent() error = %s, wantErr %s", err.Error(), tt.err) + } + }) + } +} + func TestGetClusterConfig(t *testing.T) { tests := []struct { testName string @@ -3847,7 +3980,7 @@ func TestValidateEksaVersion(t *testing.T) { func TestGetClusterDefaultKubernetesVersion(t *testing.T) { g := NewWithT(t) - g.Expect(GetClusterDefaultKubernetesVersion()).To(Equal(Kube129)) + g.Expect(GetClusterDefaultKubernetesVersion()).To(Equal(Kube130)) } func TestClusterWorkerNodeConfigCount(t *testing.T) { diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 4173209b3cfe..12628f72e938 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -8,6 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -241,7 +242,7 @@ type RegistryMirrorConfiguration struct { // OCINamespace represents an entity in a local reigstry to group related images. type OCINamespace struct { - // Name refers to the name of the upstream registry + // Registry refers to the name of the upstream registry Registry string `json:"registry"` // Namespace refers to the name of a namespace in the local registry Namespace string `json:"namespace"` @@ -309,6 +310,9 @@ type ControlPlaneConfiguration struct { MachineHealthCheck *MachineHealthCheck `json:"machineHealthCheck,omitempty"` // APIServerExtraArgs defines the flags to configure for the API server. APIServerExtraArgs map[string]string `json:"apiServerExtraArgs,omitempty"` + // KubeletConfiguration is a struct that exposes the Kubelet settings for the user to set on control plane nodes. + // +kubebuilder:pruning:PreserveUnknownFields + KubeletConfiguration *unstructured.Unstructured `json:"kubeletConfiguration,omitempty"` } // MachineHealthCheck allows to configure timeouts for machine health checks. Machine Health Checks are responsible for remediating unhealthy Machines. @@ -453,10 +457,13 @@ type WorkerNodeGroupConfiguration struct { // UpgradeRolloutStrategy determines the rollout strategy to use for rolling upgrades // and related parameters/knobs UpgradeRolloutStrategy *WorkerNodesUpgradeRolloutStrategy `json:"upgradeRolloutStrategy,omitempty"` - // KuberenetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. + // KubernetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. KubernetesVersion *KubernetesVersion `json:"kubernetesVersion,omitempty"` // MachineHealthCheck is a worker node level override for the timeouts and maxUnhealthy specified in the top-level MHC configuration. If not configured, the defaults in the top-level MHC configuration are used. MachineHealthCheck *MachineHealthCheck `json:"machineHealthCheck,omitempty"` + // KubeletConfiguration is a struct that exposes the Kubelet settings for the user to set on worker nodes. + // +kubebuilder:pruning:PreserveUnknownFields + KubeletConfiguration *unstructured.Unstructured `json:"kubeletConfiguration,omitempty"` } // Equal compares two WorkerNodeGroupConfigurations. @@ -824,6 +831,7 @@ const ( Kube127 KubernetesVersion = "1.27" Kube128 KubernetesVersion = "1.28" Kube129 KubernetesVersion = "1.29" + Kube130 KubernetesVersion = "1.30" ) // KubeVersionToSemver converts kube version to semver for comparisons. @@ -1045,6 +1053,14 @@ func (n *Ref) Equal(o *Ref) bool { return n.Kind == o.Kind && n.Name == o.Name } +// IsEmpty checks if the given ref object is empty. +func (n Ref) IsEmpty() bool { + if n.Kind == "" && n.Name == "" { + return true + } + return false +} + // +kubebuilder:object:generate=false // Interface for getting DatacenterRef fields for Cluster type. type ProviderRefAccessor interface { diff --git a/pkg/api/v1alpha1/cluster_types_test.go b/pkg/api/v1alpha1/cluster_types_test.go index 036cd117223a..c479ec9f6acd 100644 --- a/pkg/api/v1alpha1/cluster_types_test.go +++ b/pkg/api/v1alpha1/cluster_types_test.go @@ -854,6 +854,48 @@ func TestClusterEqualGitOpsRef(t *testing.T) { } } +func TestClusterIsEmptyRef(t *testing.T) { + testCases := []struct { + testName string + templateRef v1alpha1.Ref + want bool + }{ + { + testName: "kind not empty", + templateRef: v1alpha1.Ref{ + Kind: "k1", + }, + want: false, + }, + { + testName: "name not empty", + templateRef: v1alpha1.Ref{ + Name: "n1", + }, + want: false, + }, + { + testName: "both not empty", + templateRef: v1alpha1.Ref{ + Kind: "k", + Name: "n", + }, + want: false, + }, + { + testName: "both not empty", + templateRef: v1alpha1.Ref{}, + want: true, + }, + } + for _, tt := range testCases { + t.Run(tt.testName, func(t *testing.T) { + g := NewWithT(t) + g.Expect(tt.templateRef.IsEmpty()).To(Equal(tt.want)) + }) + } +} + func TestClusterEqualClusterNetwork(t *testing.T) { testCases := []struct { testName string diff --git a/pkg/api/v1alpha1/cluster_webhook.go b/pkg/api/v1alpha1/cluster_webhook.go index 97845612add8..7cc812afbdff 100644 --- a/pkg/api/v1alpha1/cluster_webhook.go +++ b/pkg/api/v1alpha1/cluster_webhook.go @@ -66,7 +66,7 @@ func (r *Cluster) ValidateCreate() (admission.Warnings, error) { return nil, apierrors.NewBadRequest("creating new cluster on existing cluster is not supported for self managed clusters") } - if r.Spec.EtcdEncryption != nil { + if !r.IsReconcilePaused() && r.Spec.EtcdEncryption != nil { allErrs = append(allErrs, field.Invalid(field.NewPath("spec.etcdEncryption"), r.Spec.EtcdEncryption, "etcdEncryption is not supported during cluster creation")) } diff --git a/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go b/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go index 1eaf40e16525..1055bb8e05c6 100644 --- a/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go +++ b/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go @@ -175,6 +175,22 @@ func TestGetNutanixDatacenterConfigValidConfig(t *testing.T) { assert.Contains(t, err.Error(), "NutanixDatacenterConfig credentialRef name is not set or is empty") }, }, + { + name: "datacenterconfig-valid-failure-domains", + fileName: "testdata/nutanix/datacenterconfig-valid-failuredomains.yaml", + assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) { + assert.NoError(t, dcConf.Validate()) + }, + }, + { + name: "datecenterconfig-invalid-failure-domains", + fileName: "testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml", + assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) { + err := dcConf.Validate() + assert.Error(t, err) + assert.Contains(t, err.Error(), "NutanixDatacenterConfig.Spec.FailureDomains.Subnets: missing subnet UUID: default/eksa-unit-test") + }, + }, } for _, test := range tests { diff --git a/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go b/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go index 4fb6fc522d75..2ab1b77467f3 100644 --- a/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go +++ b/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go @@ -43,6 +43,31 @@ type NutanixDatacenterConfigSpec struct { // for the Nutanix Prism Central. The namespace for the secret is assumed to be a constant i.e. eksa-system. // +optional CredentialRef *Ref `json:"credentialRef,omitempty"` + + // FailureDomains is the optional list of failure domains for the Nutanix Datacenter. + // +optional + FailureDomains []NutanixDatacenterFailureDomain `json:"failureDomains,omitempty"` +} + +// NutanixDatacenterFailureDomain defines the failure domain for the Nutanix Datacenter. +type NutanixDatacenterFailureDomain struct { + // Name is the unique name of the failure domain. + // Name must be between 1 and 64 characters long. + // It must consist of only lower case alphanumeric characters and hyphens (-). + // It must start and end with an alphanumeric character. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + Name string `json:"name"` + + // Cluster is the Prism Element cluster name or uuid that is connected to the Prism Central. + // +kubebuilder:validation:Required + Cluster NutanixResourceIdentifier `json:"cluster,omitempty"` + + // Subnets holds the list of subnets identifiers cluster's network subnets. + // +kubebuilder:validation:Required + Subnets []NutanixResourceIdentifier `json:"subnets,omitempty"` } // NutanixDatacenterConfigStatus defines the observed state of NutanixDatacenterConfig. @@ -140,9 +165,42 @@ func (in *NutanixDatacenterConfig) Validate() error { } } + if in.Spec.FailureDomains != nil && len(in.Spec.FailureDomains) != 0 { + dccName := in.Namespace + "/" + in.Name + validateClusterResourceIdentifier := createValidateNutanixResourceFunc("NutanixDatacenterConfig.Spec.FailureDomains.Cluster", "cluster", dccName) + validateSubnetResourceIdentifier := createValidateNutanixResourceFunc("NutanixDatacenterConfig.Spec.FailureDomains.Subnets", "subnet", dccName) + for _, fd := range in.Spec.FailureDomains { + if err := validateClusterResourceIdentifier(&fd.Cluster); err != nil { + return err + } + + for _, subnet := range fd.Subnets { + if err := validateSubnetResourceIdentifier(&subnet); err != nil { + return err + } + } + } + } + return nil } +func createValidateNutanixResourceFunc(msgPrefix, entityName, mfstName string) func(*NutanixResourceIdentifier) error { + return func(ntnxRId *NutanixResourceIdentifier) error { + if ntnxRId.Type != NutanixIdentifierName && ntnxRId.Type != NutanixIdentifierUUID { + return fmt.Errorf("%s: invalid identifier type for %s: %s", msgPrefix, entityName, ntnxRId.Type) + } + + if ntnxRId.Type == NutanixIdentifierName && (ntnxRId.Name == nil || *ntnxRId.Name == "") { + return fmt.Errorf("%s: missing %s name: %s", msgPrefix, entityName, mfstName) + } else if ntnxRId.Type == NutanixIdentifierUUID && (ntnxRId.UUID == nil || *ntnxRId.UUID == "") { + return fmt.Errorf("%s: missing %s UUID: %s", msgPrefix, entityName, mfstName) + } + + return nil + } +} + // SetDefaults sets default values for the NutanixDatacenterConfig object. func (in *NutanixDatacenterConfig) SetDefaults() { if in.Spec.CredentialRef == nil { diff --git a/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml b/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml index dd4f10c4bb77..069c9a667452 100644 --- a/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml +++ b/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml @@ -17,7 +17,7 @@ spec: upgradeRolloutStrategy: type: InPlace datacenterRef: {} - kubernetesVersion: "1.29" + kubernetesVersion: "1.30" managementCluster: name: test-cluster workerNodeGroupConfigurations: diff --git a/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml new file mode 100644 index 000000000000..b25f74bc958c --- /dev/null +++ b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml @@ -0,0 +1,30 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + name: eksa-unit-test + kind: Secret + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" diff --git a/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml new file mode 100644 index 000000000000..02f806ff343e --- /dev/null +++ b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml @@ -0,0 +1,30 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + name: eksa-unit-test + kind: Secret + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" diff --git a/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go b/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go index 1a2cc8fcf062..817eaca2dc8a 100644 --- a/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go +++ b/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go @@ -22,7 +22,9 @@ func NewTinkerbellDatacenterConfigGenerate(clusterName string) *TinkerbellDatace ObjectMeta: ObjectMeta{ Name: clusterName, }, - Spec: TinkerbellDatacenterConfigSpec{}, + Spec: TinkerbellDatacenterConfigSpec{ + TinkerbellIP: "", + }, } } diff --git a/pkg/api/v1alpha1/tinkerbellmachineconfig.go b/pkg/api/v1alpha1/tinkerbellmachineconfig.go index 6caf77e7ea0b..c952aa7e9df2 100644 --- a/pkg/api/v1alpha1/tinkerbellmachineconfig.go +++ b/pkg/api/v1alpha1/tinkerbellmachineconfig.go @@ -27,6 +27,7 @@ func NewTinkerbellMachineConfigGenerate(name string, opts ...TinkerbellMachineCo Spec: TinkerbellMachineConfigSpec{ HardwareSelector: HardwareSelector{}, OSFamily: Ubuntu, + OSImageURL: "", Users: []UserConfiguration{ { Name: "ec2-user", diff --git a/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go b/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go index 8de153fbeef8..68304aba1568 100644 --- a/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go +++ b/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go @@ -18,7 +18,7 @@ type TinkerbellMachineConfigSpec struct { // OSImageURL is a URL to the OS image used during provisioning. It must include // the Kubernetes version(s). For example, a URL used for Kubernetes 1.27 could // be http://localhost:8080/ubuntu-2204-1.27.tgz - OSImageURL string `json:"osImageURL,omitempty"` + OSImageURL string `json:"osImageURL"` Users []UserConfiguration `json:"users,omitempty"` HostOSConfiguration *HostOSConfiguration `json:"hostOSConfiguration,omitempty"` } diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index e77f5f744048..b595f09e020a 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -885,6 +885,10 @@ func (in *ControlPlaneConfiguration) DeepCopyInto(out *ControlPlaneConfiguration (*out)[key] = val } } + if in.KubeletConfiguration != nil { + in, out := &in.KubeletConfiguration, &out.KubeletConfiguration + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneConfiguration. @@ -2008,6 +2012,13 @@ func (in *NutanixDatacenterConfigSpec) DeepCopyInto(out *NutanixDatacenterConfig *out = new(Ref) **out = **in } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]NutanixDatacenterFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixDatacenterConfigSpec. @@ -2035,6 +2046,29 @@ func (in *NutanixDatacenterConfigStatus) DeepCopy() *NutanixDatacenterConfigStat return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixDatacenterFailureDomain) DeepCopyInto(out *NutanixDatacenterFailureDomain) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixDatacenterFailureDomain. +func (in *NutanixDatacenterFailureDomain) DeepCopy() *NutanixDatacenterFailureDomain { + if in == nil { + return nil + } + out := new(NutanixDatacenterFailureDomain) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixMachineConfig) DeepCopyInto(out *NutanixMachineConfig) { *out = *in @@ -3470,6 +3504,10 @@ func (in *WorkerNodeGroupConfiguration) DeepCopyInto(out *WorkerNodeGroupConfigu *out = new(MachineHealthCheck) (*in).DeepCopyInto(*out) } + if in.KubeletConfiguration != nil { + in, out := &in.KubeletConfiguration, &out.KubeletConfiguration + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeGroupConfiguration. diff --git a/pkg/cluster/build.go b/pkg/cluster/build.go index 23621d4931b8..f83d9949515f 100644 --- a/pkg/cluster/build.go +++ b/pkg/cluster/build.go @@ -6,7 +6,7 @@ func NewDefaultConfigClientBuilder() *ConfigClientBuilder { return NewConfigClientBuilder().Register( getCloudStackMachineConfigs, getCloudStackDatacenter, - getTinkerbellMachineConfigs, + getTinkerbellMachineAndTemplateConfigs, getTinkerbellDatacenter, getDockerDatacenter, getVSphereDatacenter, diff --git a/pkg/cluster/spec_test.go b/pkg/cluster/spec_test.go index 2d36875e9885..9c7b90fd4337 100644 --- a/pkg/cluster/spec_test.go +++ b/pkg/cluster/spec_test.go @@ -1,7 +1,6 @@ package cluster_test import ( - "embed" "testing" eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1" @@ -12,13 +11,9 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/files" - "github.com/aws/eks-anywhere/pkg/manifests/eksd" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) -//go:embed testdata -var testdataFS embed.FS - func TestNewSpecError(t *testing.T) { version := test.DevEksaVersion() tests := []struct { @@ -375,14 +370,3 @@ func validateVersionedRepo(t *testing.T, gotImage cluster.VersionedRepository, w t.Errorf("GetNewSpec() = Spec: Invalid kubernetes repo, got %s, want %s", gotImage.Tag, wantTag) } } - -func readEksdRelease(tb testing.TB, url string) *eksdv1.Release { - tb.Helper() - r := files.NewReader() - release, err := eksd.ReadManifest(r, url) - if err != nil { - tb.Fatalf("Failed reading eks-d manifest: %s", err) - } - - return release -} diff --git a/pkg/cluster/tinkerbell.go b/pkg/cluster/tinkerbell.go index 04d1d87d1d99..5b83adf17d3f 100644 --- a/pkg/cluster/tinkerbell.go +++ b/pkg/cluster/tinkerbell.go @@ -106,7 +106,7 @@ func getTinkerbellDatacenter(ctx context.Context, client Client, c *Config) erro return nil } -func getTinkerbellMachineConfigs(ctx context.Context, client Client, c *Config) error { +func getTinkerbellMachineAndTemplateConfigs(ctx context.Context, client Client, c *Config) error { if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.TinkerbellDatacenterKind { return nil } @@ -125,6 +125,19 @@ func getTinkerbellMachineConfigs(ctx context.Context, client Client, c *Config) } c.TinkerbellMachineConfigs[machineConfig.Name] = machineConfig + + if !machineConfig.Spec.TemplateRef.IsEmpty() { + if c.TinkerbellTemplateConfigs == nil { + c.TinkerbellTemplateConfigs = map[string]*anywherev1.TinkerbellTemplateConfig{} + } + + templateRefName := machineConfig.Spec.TemplateRef.Name + templateConfig := &anywherev1.TinkerbellTemplateConfig{} + if err := client.Get(ctx, templateRefName, c.Cluster.Namespace, templateConfig); err != nil { + return err + } + c.TinkerbellTemplateConfigs[templateRefName] = templateConfig + } } return nil } diff --git a/pkg/cluster/tinkerbell_test.go b/pkg/cluster/tinkerbell_test.go index 8f2ce1dbfcf2..a3251eb6694b 100644 --- a/pkg/cluster/tinkerbell_test.go +++ b/pkg/cluster/tinkerbell_test.go @@ -251,3 +251,105 @@ func TestDefaultConfigClientBuilderTinkerbellCluster(t *testing.T) { g.Expect(config.TinkerbellMachineConfigs["machine-1"]).To(Equal(machineControlPlane)) g.Expect(config.TinkerbellMachineConfigs["machine-2"]).To(Equal(machineWorker)) } + +func TestDefaultConfigClientBuilderTinkerbellClusterWithTemplateConfig(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + b := cluster.NewDefaultConfigClientBuilder() + ctrl := gomock.NewController(t) + client := mocks.NewMockClient(ctrl) + cluster := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + Spec: anywherev1.ClusterSpec{ + DatacenterRef: anywherev1.Ref{ + Kind: anywherev1.TinkerbellDatacenterKind, + Name: "datacenter", + }, + ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{ + MachineGroupRef: &anywherev1.Ref{ + Kind: anywherev1.TinkerbellMachineConfigKind, + Name: "machine-1", + }, + }, + WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{ + { + MachineGroupRef: &anywherev1.Ref{ + Kind: anywherev1.TinkerbellMachineConfigKind, + Name: "machine-2", + }, + }, + { + MachineGroupRef: &anywherev1.Ref{ + Kind: anywherev1.CloudStackMachineConfigKind, // Should not process this one + Name: "machine-3", + }, + }, + }, + }, + } + datacenter := &anywherev1.TinkerbellDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "datacenter", + Namespace: "default", + }, + } + machineControlPlane := &anywherev1.TinkerbellMachineConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-1", + Namespace: "default", + }, + Spec: anywherev1.TinkerbellMachineConfigSpec{ + TemplateRef: anywherev1.Ref{ + Name: "template-name", + }, + }, + } + machineWorker := &anywherev1.TinkerbellMachineConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-2", + Namespace: "default", + }, + } + client.EXPECT().Get(ctx, "datacenter", "default", &anywherev1.TinkerbellDatacenterConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + d := obj.(*anywherev1.TinkerbellDatacenterConfig) + d.ObjectMeta = datacenter.ObjectMeta + d.Spec = datacenter.Spec + return nil + }, + ) + client.EXPECT().Get(ctx, "machine-1", "default", &anywherev1.TinkerbellMachineConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + m := obj.(*anywherev1.TinkerbellMachineConfig) + m.ObjectMeta = machineControlPlane.ObjectMeta + m.Spec = machineControlPlane.Spec + return nil + }, + ) + client.EXPECT().Get(ctx, "machine-2", "default", &anywherev1.TinkerbellMachineConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + m := obj.(*anywherev1.TinkerbellMachineConfig) + m.ObjectMeta = machineWorker.ObjectMeta + return nil + }, + ) + client.EXPECT().Get(ctx, "template-name", "default", &anywherev1.TinkerbellTemplateConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + m := obj.(*anywherev1.TinkerbellTemplateConfig) + m.ObjectMeta = machineWorker.ObjectMeta + return nil + }, + ) + + config, err := b.Build(ctx, client, cluster) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(config).NotTo(BeNil()) + g.Expect(config.Cluster).To(Equal(cluster)) + g.Expect(config.TinkerbellDatacenter).To(Equal(datacenter)) + g.Expect(len(config.TinkerbellMachineConfigs)).To(Equal(2)) + g.Expect(config.TinkerbellMachineConfigs["machine-1"]).To(Equal(machineControlPlane)) + g.Expect(config.TinkerbellMachineConfigs["machine-2"]).To(Equal(machineWorker)) +} diff --git a/pkg/clusterapi/etcd.go b/pkg/clusterapi/etcd.go index 33edaa06914f..94f58f0bce80 100644 --- a/pkg/clusterapi/etcd.go +++ b/pkg/clusterapi/etcd.go @@ -15,7 +15,7 @@ import ( ) // SetUbuntuConfigInEtcdCluster sets up the etcd config in EtcdadmCluster. -func SetUbuntuConfigInEtcdCluster(etcd *etcdv1.EtcdadmCluster, versionsBundle *cluster.VersionsBundle, eksaVersion string) { +func SetUbuntuConfigInEtcdCluster(etcd *etcdv1.EtcdadmCluster, versionsBundle *cluster.VersionsBundle, eksaVersion *v1alpha1.EksaVersion) { etcd.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format("cloud-config") etcd.Spec.EtcdadmConfigSpec.CloudInitConfig = &etcdbootstrapv1.CloudInitConfig{ Version: versionsBundle.KubeDistro.EtcdVersion, diff --git a/pkg/clusterapi/etcd_test.go b/pkg/clusterapi/etcd_test.go index 2b7c5dd48241..72f4a87c31ea 100644 --- a/pkg/clusterapi/etcd_test.go +++ b/pkg/clusterapi/etcd_test.go @@ -15,7 +15,6 @@ import ( func TestSetUbuntuConfigInEtcdCluster(t *testing.T) { g := newApiBuilerTest(t) eksaVersion := anywherev1.EksaVersion("v0.19.2") - g.clusterSpec.Cluster.Spec.EksaVersion = &eksaVersion got := wantEtcdCluster() versionBundle := g.clusterSpec.VersionsBundles["1.21"] @@ -26,14 +25,13 @@ func TestSetUbuntuConfigInEtcdCluster(t *testing.T) { InstallDir: "/usr/bin", EtcdReleaseURL: versionBundle.KubeDistro.EtcdURL, } - clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, string(eksaVersion)) + clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, &eksaVersion) g.Expect(got).To(Equal(want)) } func TestSetUbuntuConfigInEtcdClusterNoEtcdUrl(t *testing.T) { g := newApiBuilerTest(t) eksaVersion := anywherev1.EksaVersion("v0.18.2") - g.clusterSpec.Cluster.Spec.EksaVersion = &eksaVersion got := wantEtcdCluster() versionBundle := g.clusterSpec.VersionsBundles["1.21"] @@ -43,7 +41,7 @@ func TestSetUbuntuConfigInEtcdClusterNoEtcdUrl(t *testing.T) { Version: versionBundle.KubeDistro.EtcdVersion, InstallDir: "/usr/bin", } - clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, string(eksaVersion)) + clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, &eksaVersion) g.Expect(got).To(Equal(want)) } diff --git a/pkg/clustermanager/cluster_manager.go b/pkg/clustermanager/cluster_manager.go index d14dbe3aea7d..a0ec614c7b5d 100644 --- a/pkg/clustermanager/cluster_manager.go +++ b/pkg/clustermanager/cluster_manager.go @@ -674,6 +674,73 @@ func (c *ClusterManager) PauseCAPIWorkloadClusters(ctx context.Context, manageme return nil } +func (c *ClusterManager) resumeEksaReconcileForManagementAndWorkloadClusters(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error { + clusters := &v1alpha1.ClusterList{} + err := c.clusterClient.ListObjects(ctx, eksaClusterResourceType, clusterSpec.Cluster.Namespace, managementCluster.KubeconfigFile, clusters) + if err != nil { + return err + } + + for _, w := range clusters.Items { + if w.ManagedBy() != clusterSpec.Cluster.Name { + continue + } + + if err := c.resumeReconcileForCluster(ctx, managementCluster, &w, provider); err != nil { + return err + } + } + + return nil +} + +// ResumeEKSAControllerReconcile resumes a paused EKS-Anywhere cluster. +func (c *ClusterManager) ResumeEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error { + // clear pause annotation + clusterSpec.Cluster.ClearPauseAnnotation() + provider.DatacenterConfig(clusterSpec).ClearPauseAnnotation() + + if clusterSpec.Cluster.IsSelfManaged() { + return c.resumeEksaReconcileForManagementAndWorkloadClusters(ctx, cluster, clusterSpec, provider) + } + + return c.resumeReconcileForCluster(ctx, cluster, clusterSpec.Cluster, provider) +} + +func (c *ClusterManager) resumeReconcileForCluster(ctx context.Context, clusterCreds *types.Cluster, cluster *v1alpha1.Cluster, provider providers.Provider) error { + pausedAnnotation := cluster.PausedAnnotation() + err := c.clusterClient.RemoveAnnotationInNamespace(ctx, provider.DatacenterResourceType(), cluster.Spec.DatacenterRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace) + if err != nil { + return fmt.Errorf("removing paused annotation when resuming datacenterconfig reconciliation: %v", err) + } + + if provider.MachineResourceType() != "" { + for _, machineConfigRef := range cluster.MachineConfigRefs() { + err = c.clusterClient.RemoveAnnotationInNamespace(ctx, provider.MachineResourceType(), machineConfigRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace) + if err != nil { + return fmt.Errorf("removing paused annotation when resuming reconciliation for machine config %s: %v", machineConfigRef.Name, err) + } + } + } + + err = c.clusterClient.RemoveAnnotationInNamespace(ctx, cluster.ResourceType(), cluster.Name, pausedAnnotation, clusterCreds, cluster.Namespace) + if err != nil { + return fmt.Errorf("removing paused annotation when resuming cluster reconciliation: %v", err) + } + + if err = c.clusterClient.RemoveAnnotationInNamespace(ctx, + cluster.ResourceType(), + cluster.Name, + v1alpha1.ManagedByCLIAnnotation, + clusterCreds, + cluster.Namespace, + ); err != nil { + return fmt.Errorf("removing managed by CLI annotation when resuming cluster reconciliation: %v", err) + } + + return nil +} + // ResumeCAPIWorkloadClusters resumes all workload CAPI clusters except the management cluster. func (c *ClusterManager) ResumeCAPIWorkloadClusters(ctx context.Context, managementCluster *types.Cluster) error { clusters, err := c.clusterClient.GetClusters(ctx, managementCluster) @@ -693,6 +760,21 @@ func (c *ClusterManager) ResumeCAPIWorkloadClusters(ctx context.Context, managem return nil } +// AllowDeleteWhilePaused allows the deletion of paused clusters. +func (c *ClusterManager) AllowDeleteWhilePaused(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { + return c.allowDeleteWhilePaused(ctx, cluster, clusterSpec.Cluster) +} + +func (c *ClusterManager) allowDeleteWhilePaused(ctx context.Context, clusterCreds *types.Cluster, cluster *v1alpha1.Cluster) error { + allowDelete := map[string]string{v1alpha1.AllowDeleteWhenPausedAnnotation: "true"} + + if err := c.clusterClient.UpdateAnnotationInNamespace(ctx, cluster.ResourceType(), cluster.Name, allowDelete, clusterCreds, cluster.Namespace); err != nil { + return fmt.Errorf("updating paused annotation in cluster reconciliation: %v", err) + } + + return nil +} + func (c *ClusterManager) PauseEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error { if clusterSpec.Cluster.IsSelfManaged() { return c.pauseEksaReconcileForManagementAndWorkloadClusters(ctx, cluster, clusterSpec, provider) diff --git a/pkg/clustermanager/cluster_manager_test.go b/pkg/clustermanager/cluster_manager_test.go index 476cfca75239..f2c00e0863ef 100644 --- a/pkg/clustermanager/cluster_manager_test.go +++ b/pkg/clustermanager/cluster_manager_test.go @@ -771,7 +771,7 @@ func TestPauseEKSAControllerReconcileWorkloadCluster(t *testing.T) { tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed()) } -func TestPauseEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *testing.T) { +func TestResumeEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *testing.T) { tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0))) tt.clusterSpec.Cluster = &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -788,15 +788,26 @@ func TestPauseEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *tes }, } + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.clusterName, + }, + Spec: v1alpha1.VSphereDatacenterConfigSpec{ + Insecure: true, + }, + } + pauseAnnotation := "anywhere.eks.amazonaws.com/paused" + tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType) tt.mocks.provider.EXPECT().MachineResourceType().Return("") - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(errors.New("pause eksa cluster error")) + tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, pauseAnnotation, tt.cluster, "").Return(nil) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, pauseAnnotation, tt.cluster, "").Return(errors.New("pause eksa cluster error")) - tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) + tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) } -func TestPauseEKSAControllerReconcileManagementCluster(t *testing.T) { +func TestResumeEKSAControllerReconcileManagementCluster(t *testing.T) { tt := newTest(t) tt.clusterSpec.Cluster = &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -813,6 +824,18 @@ func TestPauseEKSAControllerReconcileManagementCluster(t *testing.T) { }, } + tt.clusterSpec.Cluster.PauseReconcile() + + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.clusterName, + }, + Spec: v1alpha1.VSphereDatacenterConfigSpec{ + Insecure: true, + }, + } + pauseAnnotation := "anywhere.eks.amazonaws.com/paused" + tt.mocks.client.EXPECT(). ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}). DoAndReturn(func(_ context.Context, _, _, _ string, obj *v1alpha1.ClusterList) error { @@ -851,34 +874,31 @@ func TestPauseEKSAControllerReconcileManagementCluster(t *testing.T) { }) tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType).Times(2) tt.mocks.provider.EXPECT().MachineResourceType().Return("").Times(2) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil).Times(2) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace( + tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, pauseAnnotation, tt.cluster, "").Return(nil).Times(2) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, pauseAnnotation, tt.cluster, "").Return(nil) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace( tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, - map[string]string{ - v1alpha1.ManagedByCLIAnnotation: "true", - }, + v1alpha1.ManagedByCLIAnnotation, tt.cluster, "", ).Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, "workload-cluster-1", expectedPauseAnnotation, tt.cluster, "").Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace( + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, "workload-cluster-1", pauseAnnotation, tt.cluster, "").Return(nil) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace( tt.ctx, eksaClusterResourceType, "workload-cluster-1", - map[string]string{ - v1alpha1.ManagedByCLIAnnotation: "true", - }, + v1alpha1.ManagedByCLIAnnotation, tt.cluster, "", ).Return(nil) - tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed()) + tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed()) } -func TestPauseEKSAControllerReconcileManagementClusterListObjectsError(t *testing.T) { +func TestResumeEKSAControllerReconcileManagementClusterListObjectsError(t *testing.T) { tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0))) tt.clusterSpec.Cluster = &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -891,9 +911,20 @@ func TestPauseEKSAControllerReconcileManagementClusterListObjectsError(t *testin }, } + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.clusterName, + }, + Spec: v1alpha1.VSphereDatacenterConfigSpec{ + Insecure: true, + }, + } + + tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig) + tt.mocks.client.EXPECT().ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}).Return(errors.New("list error")) - tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) + tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) } func TestPauseEKSAControllerReconcileWorkloadClusterWithMachineConfig(t *testing.T) { @@ -1084,3 +1115,31 @@ func TestCreateRegistryCredSecretSuccess(t *testing.T) { err := tt.clusterManager.CreateRegistryCredSecret(tt.ctx, tt.cluster) tt.Expect(err).To(BeNil()) } + +func TestAllowDeleteWhilePaused(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "success allow delete while paused", + err: nil, + }, + { + name: "fail allow delete while paused", + err: fmt.Errorf("failure"), + }, + } + allowDelete := map[string]string{v1alpha1.AllowDeleteWhenPausedAnnotation: "true"} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tt := newTest(t) + cluster := tt.clusterSpec.Cluster + tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, cluster.ResourceType(), cluster.Name, allowDelete, tt.cluster, cluster.Namespace).Return(test.err) + err := tt.clusterManager.AllowDeleteWhilePaused(tt.ctx, tt.cluster, tt.clusterSpec) + expectedErr := fmt.Errorf("updating paused annotation in cluster reconciliation: %v", test.err) + tt.Expect(err).To(Or(BeNil(), MatchError(expectedErr))) + }) + } +} diff --git a/pkg/clustermanager/eksa_installer.go b/pkg/clustermanager/eksa_installer.go index 86193c334419..6e7b27fe2cc7 100644 --- a/pkg/clustermanager/eksa_installer.go +++ b/pkg/clustermanager/eksa_installer.go @@ -247,6 +247,11 @@ func setManagerEnvVars(d *appsv1.Deployment, spec *cluster.Spec) { envVars = append(envVars, v1.EnvVar{Name: features.VSphereInPlaceEnvVar, Value: "true"}) } + // TODO: remove this feature flag when we support API server flags. + if features.IsActive(features.APIServerExtraArgsEnabled()) { + envVars = append(envVars, v1.EnvVar{Name: features.APIServerExtraArgsEnabledEnvVar, Value: "true"}) + } + d.Spec.Template.Spec.Containers[0].Env = envVars } diff --git a/pkg/clustermanager/eksa_installer_test.go b/pkg/clustermanager/eksa_installer_test.go index 16875979f4ab..3d17b1738367 100644 --- a/pkg/clustermanager/eksa_installer_test.go +++ b/pkg/clustermanager/eksa_installer_test.go @@ -420,6 +420,26 @@ func TestSetManagerEnvVarsVSphereInPlaceUpgrade(t *testing.T) { g.Expect(deploy).To(Equal(want)) } +func TestSetManagerEnvVarsAPIServerExtraArgs(t *testing.T) { + g := NewWithT(t) + features.ClearCache() + t.Setenv(features.APIServerExtraArgsEnabledEnvVar, "true") + + deploy := deployment() + spec := test.NewClusterSpec() + want := deployment(func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + { + Name: "API_SERVER_EXTRA_ARGS_ENABLED", + Value: "true", + }, + } + }) + + clustermanager.SetManagerEnvVars(deploy, spec) + g.Expect(deploy).To(Equal(want)) +} + func TestEKSAInstallerNewUpgraderConfigMap(t *testing.T) { tt := newInstallerTest(t) diff --git a/pkg/clustermanager/eksa_mover.go b/pkg/clustermanager/eksa_mover.go new file mode 100644 index 000000000000..f2a72f66bb94 --- /dev/null +++ b/pkg/clustermanager/eksa_mover.go @@ -0,0 +1,137 @@ +package clustermanager + +import ( + "context" + "math" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/retrier" +) + +// MoverOpt allows to customize a Mover on construction. +type MoverOpt func(*Mover) + +// Mover applies the cluster spec to the management cluster and waits +// until the changes are fully reconciled. +type Mover struct { + log logr.Logger + clientFactory ClientFactory + moveClusterTimeout time.Duration + retryBackOff time.Duration +} + +// NewMover builds an Mover. +func NewMover(log logr.Logger, clientFactory ClientFactory, opts ...MoverOpt) *Mover { + m := &Mover{ + log: log, + clientFactory: clientFactory, + moveClusterTimeout: applyClusterSpecTimeout, + retryBackOff: retryBackOff, + } + + for _, opt := range opts { + opt(m) + } + + return m +} + +// WithMoverNoTimeouts disables the timeout for all the waits and retries in management upgrader. +func WithMoverNoTimeouts() MoverOpt { + return func(a *Mover) { + maxTime := time.Duration(math.MaxInt64) + a.moveClusterTimeout = maxTime + } +} + +// WithMoverApplyClusterTimeout allows to configure how long the mover retries +// to apply the objects in case of failure. +// Generally only used in tests. +func WithMoverApplyClusterTimeout(timeout time.Duration) MoverOpt { + return func(m *Mover) { + m.moveClusterTimeout = timeout + } +} + +// WithMoverRetryBackOff allows to configure how long the mover waits between requests +// to update the cluster spec objects and check the status of the Cluster. +// Generally only used in tests. +func WithMoverRetryBackOff(backOff time.Duration) MoverOpt { + return func(m *Mover) { + m.retryBackOff = backOff + } +} + +// Move applies the cluster's namespace and spec without checking for reconcile conditions. +func (m *Mover) Move(ctx context.Context, spec *cluster.Spec, fromClient, toClient kubernetes.Client) error { + m.log.V(3).Info("Moving the cluster object") + err := retrier.New( + m.moveClusterTimeout, + retrier.WithRetryPolicy(retrier.BackOffPolicy(m.retryBackOff)), + ).Retry(func() error { + // read the cluster from bootstrap + cluster := &v1alpha1.Cluster{} + if err := fromClient.Get(ctx, spec.Cluster.Name, spec.Cluster.Namespace, cluster); err != nil { + return errors.Wrapf(err, "reading cluster from source") + } + + // pause cluster on bootstrap + cluster.PauseReconcile() + if err := fromClient.Update(ctx, cluster); err != nil { + return errors.Wrapf(err, "updating cluster on source") + } + + if err := moveClusterResource(ctx, cluster, toClient); err != nil { + return err + } + + if err := moveChildObjects(ctx, spec, fromClient, toClient); err != nil { + return err + } + + return nil + }) + + return err +} + +func moveClusterResource(ctx context.Context, cluster *v1alpha1.Cluster, client kubernetes.Client) error { + cluster.ResourceVersion = "" + cluster.UID = "" + + // move eksa cluster + if err := client.Create(ctx, cluster); err != nil && !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "moving cluster %s", cluster.Name) + } + + return nil +} + +func moveChildObjects(ctx context.Context, spec *cluster.Spec, fromClient, toClient kubernetes.Client) error { + // read and move child objects + for _, child := range spec.ChildObjects() { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(child.GetObjectKind().GroupVersionKind()) + if err := fromClient.Get(ctx, child.GetName(), child.GetNamespace(), obj); err != nil { + return errors.Wrapf(err, "reading child object %s %s", child.GetObjectKind().GroupVersionKind().Kind, child.GetName()) + } + + obj.SetResourceVersion("") + obj.SetUID("") + obj.SetOwnerReferences(nil) + + if err := toClient.Create(ctx, obj); err != nil && !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "moving child object %s %s", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName()) + } + } + + return nil +} diff --git a/pkg/clustermanager/eksa_mover_test.go b/pkg/clustermanager/eksa_mover_test.go new file mode 100644 index 000000000000..e151358f9d29 --- /dev/null +++ b/pkg/clustermanager/eksa_mover_test.go @@ -0,0 +1,126 @@ +package clustermanager_test + +import ( + "context" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/golang/mock/gomock" + "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/aws/eks-anywhere/internal/test" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/clustermanager" + "github.com/aws/eks-anywhere/pkg/clustermanager/mocks" + "github.com/aws/eks-anywhere/pkg/controller/clientutil" + "github.com/aws/eks-anywhere/pkg/types" +) + +type moverTest struct { + gomega.Gomega + tb testing.TB + clientFactory *mocks.MockClientFactory + ctx context.Context + spec *cluster.Spec + fromClient kubernetes.Client + toClient kubernetes.Client + log logr.Logger + mgmtCluster *types.Cluster + bootstrap *types.Cluster +} + +func newMoverTest(tb testing.TB) *moverTest { + ctrl := gomock.NewController(tb) + return &moverTest{ + tb: tb, + Gomega: gomega.NewWithT(tb), + clientFactory: mocks.NewMockClientFactory(ctrl), + ctx: context.Background(), + spec: test.VSphereClusterSpec(tb, tb.Name()), + log: test.NewNullLogger(), + bootstrap: &types.Cluster{ + KubeconfigFile: "bootstrap-config", + }, + mgmtCluster: &types.Cluster{ + KubeconfigFile: "my-config", + }, + } +} + +func (a *moverTest) buildClients(fromObjs, toObjs []kubernetes.Object) { + a.fromClient = test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(fromObjs)...) + a.toClient = test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(toObjs)...) +} + +func TestMoverSuccess(t *testing.T) { + tt := newMoverTest(t) + objs := tt.spec.ClusterAndChildren() + tt.buildClients(objs, nil) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverNoTimeouts(), + ) + + tt.Expect(m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient)).To(gomega.Succeed()) + + for _, obj := range objs { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + tt.Expect(tt.toClient.Get(tt.ctx, obj.GetName(), obj.GetNamespace(), u)).To(gomega.Succeed()) + original, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + tt.Expect(err).To(gomega.Succeed()) + tt.Expect(u.Object["spec"]).To(gomega.BeComparableTo(original["spec"])) + } +} + +func TestMoverFailReadCluster(t *testing.T) { + tt := newMoverTest(t) + tt.buildClients(nil, nil) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverApplyClusterTimeout(time.Millisecond), + ) + err := m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient) + + tt.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("reading cluster from source"))) +} + +func TestMoverFailGetChildren(t *testing.T) { + tt := newMoverTest(t) + objs := []kubernetes.Object{tt.spec.Cluster} + tt.buildClients(objs, nil) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverApplyClusterTimeout(time.Millisecond), + ) + + err := m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient) + tt.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("reading child object"))) +} + +func TestMoverAlreadyMoved(t *testing.T) { + tt := newMoverTest(t) + objs := tt.spec.ClusterAndChildren() + tt.buildClients(objs, objs) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverApplyClusterTimeout(time.Millisecond), + ) + + err := m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient) + tt.Expect(err).To(gomega.Succeed()) + + for _, obj := range objs { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + tt.Expect(tt.toClient.Get(tt.ctx, obj.GetName(), obj.GetNamespace(), u)).To(gomega.Succeed()) + original, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + tt.Expect(err).To(gomega.Succeed()) + // the entire object including metadata/status should be equal if the object already exists in dst + tt.Expect(u.Object).To(gomega.BeComparableTo(original)) + } +} diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 178e2b58ff13..3eb41b91e27f 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -115,3 +115,13 @@ const ( // EKSACLIFieldManager is the owner name for fields applied by the EKS-A CLI. const EKSACLIFieldManager = "eks-a-cli" + +// SupportedProviders is the list of supported providers for generating EKS-A cluster spec. +var SupportedProviders = []string{ + VSphereProviderName, + CloudStackProviderName, + TinkerbellProviderName, + DockerProviderName, + NutanixProviderName, + SnowProviderName, +} diff --git a/pkg/curatedpackages/curatedpackages.go b/pkg/curatedpackages/curatedpackages.go index 44602e7ee5ee..e68542c2d4be 100644 --- a/pkg/curatedpackages/curatedpackages.go +++ b/pkg/curatedpackages/curatedpackages.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/go-logr/logr" "oras.land/oras-go/pkg/content" @@ -15,12 +16,13 @@ import ( "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/types" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const ( - license = `The Amazon EKS Anywhere Curated Packages are only available to customers with the + license = `The Amazon EKS Anywhere Curated Packages are only available to customers with the Amazon EKS Anywhere Enterprise Subscription` width = 86 ) @@ -70,7 +72,15 @@ func PrintLicense() { func PullLatestBundle(ctx context.Context, log logr.Logger, artifact string) ([]byte, error) { puller := artifacts.NewRegistryPuller(log) - data, err := puller.Pull(ctx, artifact, "") + var data []byte + err := retrier.Retry(5, 200*time.Millisecond, func() error { + d, err := puller.Pull(ctx, artifact, "") + if err != nil { + return err + } + data = d + return nil + }) if err != nil { return nil, fmt.Errorf("unable to pull artifacts %v", err) } diff --git a/pkg/curatedpackages/mocks/installer.go b/pkg/curatedpackages/mocks/installer.go index 7dbb146c16d9..fa67f8b523f1 100644 --- a/pkg/curatedpackages/mocks/installer.go +++ b/pkg/curatedpackages/mocks/installer.go @@ -176,6 +176,20 @@ func (mr *MockChartManagerMockRecorder) InstallChart(ctx, chart, ociURI, version return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallChart", reflect.TypeOf((*MockChartManager)(nil).InstallChart), ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values) } +// RegistryLogin mocks base method. +func (m *MockChartManager) RegistryLogin(ctx context.Context, registry, username, password string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegistryLogin", ctx, registry, username, password) + ret0, _ := ret[0].(error) + return ret0 +} + +// RegistryLogin indicates an expected call of RegistryLogin. +func (mr *MockChartManagerMockRecorder) RegistryLogin(ctx, registry, username, password interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegistryLogin", reflect.TypeOf((*MockChartManager)(nil).RegistryLogin), ctx, registry, username, password) +} + // MockKubeDeleter is a mock of KubeDeleter interface. type MockKubeDeleter struct { ctrl *gomock.Controller diff --git a/pkg/curatedpackages/packagecontrollerclient.go b/pkg/curatedpackages/packagecontrollerclient.go index 7188e474fe3f..493b6ed107eb 100644 --- a/pkg/curatedpackages/packagecontrollerclient.go +++ b/pkg/curatedpackages/packagecontrollerclient.go @@ -34,8 +34,10 @@ import ( var secretsValueYaml string const ( - eksaDefaultRegion = "us-west-2" - valueFileName = "values.yaml" + eksaDefaultRegion = "us-west-2" + valueFileName = "values.yaml" + defaultRegistryMirrorUsername = "username" + defaultRegistryMirrorPassword = "password" ) type PackageControllerClientOpt func(client *PackageControllerClient) @@ -95,6 +97,7 @@ type ChartUninstaller interface { type ChartManager interface { ChartInstaller ChartUninstaller + RegistryLogin(ctx context.Context, registry, username, password string) error } // NewPackageControllerClientFullLifecycle creates a PackageControllerClient @@ -304,12 +307,14 @@ func (pc *PackageControllerClient) CreateHelmOverrideValuesYaml() (string, []byt func (pc *PackageControllerClient) generateHelmOverrideValues() ([]byte, error) { var err error - endpoint, username, password, caCertContent, insecureSkipVerify := "", "", "", "", "false" + endpoint, username, password, caCertContent, insecureSkipVerify := "", defaultRegistryMirrorUsername, defaultRegistryMirrorPassword, "", "false" if pc.registryMirror != nil { endpoint = pc.registryMirror.BaseRegistry - username, password, err = config.ReadCredentials() - if err != nil { - return []byte{}, err + if pc.registryMirror.Auth { + username, password, err = config.ReadCredentials() + if err != nil { + return []byte{}, err + } } caCertContent = pc.registryMirror.CACertContent if pc.registryMirror.InsecureSkipVerify { @@ -492,6 +497,17 @@ func (pc *PackageControllerClient) Reconcile(ctx context.Context, logger logr.Lo registry := registrymirror.FromCluster(cluster) + if registry != nil && registry.Auth { + rUsername, rPassword, err := config.ReadCredentialsFromSecret(ctx, client) + if err != nil { + return err + } + + if err := pc.chartManager.RegistryLogin(ctx, registry.BaseRegistry, rUsername, rPassword); err != nil { + return err + } + } + // No Kubeconfig is passed. This is intentional. The helm executable will // get that configuration from its environment. if err := pc.EnableFullLifecycle(ctx, logger, cluster.Name, "", image, registry, @@ -629,3 +645,10 @@ func WithRegistryAccessTester(registryTester RegistryAccessTester) func(client * config.registryAccessTester = registryTester } } + +// WithSkipWait sets skipWaitForPackageBundle. +func WithSkipWait() func(client *PackageControllerClient) { + return func(config *PackageControllerClient) { + config.skipWaitForPackageBundle = true + } +} diff --git a/pkg/curatedpackages/packagecontrollerclient_test.go b/pkg/curatedpackages/packagecontrollerclient_test.go index 2af94ff9bc93..dcdc9e44585d 100644 --- a/pkg/curatedpackages/packagecontrollerclient_test.go +++ b/pkg/curatedpackages/packagecontrollerclient_test.go @@ -524,6 +524,55 @@ func TestEnableWithEmptyProxy(t *testing.T) { } } +func TestEnableWithSkipWait(t *testing.T) { + for _, tt := range newPackageControllerTests(t) { + tt.command = curatedpackages.NewPackageControllerClient( + tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.registryMirror, + curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), + curatedpackages.WithEksaRegion(tt.eksaRegion), + curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID), + curatedpackages.WithSkipWait(), + curatedpackages.WithManagementClusterName(tt.clusterName), + curatedpackages.WithValuesFileWriter(tt.writer), + ) + clusterName := fmt.Sprintf("clusterName=%s", "billy") + valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) + ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image())) + sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries(context.Background()) + sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry) + defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry) + defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry) + if tt.registryMirror != nil { + t.Setenv("REGISTRY_USERNAME", "username") + t.Setenv("REGISTRY_PASSWORD", "password") + } else { + if tt.eksaRegion == "" { + tt.eksaRegion = "us-west-2" + } + defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, "us-west-2", tt.eksaRegion) + } + values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName} + if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { + values = append(values, "cronjob.suspend=true") + } + tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) + tt.kubectl.EXPECT(). + GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(getPBCSuccess(t)). + AnyTimes() + tt.kubectl.EXPECT(). + HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). + AnyTimes() + + err := tt.command.Enable(tt.ctx) + if err != nil { + t.Errorf("Install Controller Should succeed when installation passes") + } + } +} + func TestEnableFail(t *testing.T) { for _, tt := range newPackageControllerTests(t) { clusterName := fmt.Sprintf("clusterName=%s", "billy") @@ -1387,6 +1436,145 @@ func TestReconcile(s *testing.T) { t.Errorf("expected packages client error, got %s", err) } }) + + s.Run("golden path with registry mirror", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + cm := mocks.NewMockChartManager(ctrl) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + registrySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: "registry-credentials", + }, + } + eksaRelease := createEKSARelease(cluster, bundles) + cluster.Spec.BundlesRef = nil + cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{ + Endpoint: "1.2.3.4", + Port: "443", + Authenticate: true, + OCINamespaces: []anywherev1.OCINamespace{ + { + Namespace: "ecr-public", + Registry: "public.ecr.aws", + }, + }, + } + t.Setenv("REGISTRY_USERNAME", "username") + t.Setenv("REGISTRY_PASSWORD", "password") + + objs := []runtime.Object{cluster, bundles, secret, eksaRelease, registrySecret} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + cm.EXPECT().RegistryLogin(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + cm.EXPECT().InstallChart(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err != nil { + t.Errorf("expected nil error, got %s", err) + } + }) + + s.Run("registry mirror helm login fails", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + cm := mocks.NewMockChartManager(ctrl) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + registrySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: "registry-credentials", + }, + } + eksaRelease := createEKSARelease(cluster, bundles) + cluster.Spec.BundlesRef = nil + cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{ + Endpoint: "1.2.3.4", + Port: "443", + Authenticate: true, + OCINamespaces: []anywherev1.OCINamespace{ + { + Namespace: "ecr-public", + Registry: "public.ecr.aws", + }, + }, + } + t.Setenv("REGISTRY_USERNAME", "username") + t.Setenv("REGISTRY_PASSWORD", "password") + + objs := []runtime.Object{cluster, bundles, secret, eksaRelease, registrySecret} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + cm.EXPECT().RegistryLogin(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("login error")) + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err == nil { + t.Errorf("expected error, got %s", err) + } + }) + + s.Run("registry mirror secret not found error", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + cm := mocks.NewMockChartManager(ctrl) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + eksaRelease := createEKSARelease(cluster, bundles) + cluster.Spec.BundlesRef = nil + cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{ + Endpoint: "1.2.3.4", + Port: "443", + Authenticate: true, + OCINamespaces: []anywherev1.OCINamespace{ + { + Namespace: "ecr-public", + Registry: "public.ecr.aws", + }, + }, + } + objs := []runtime.Object{cluster, bundles, secret, eksaRelease} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err == nil || !strings.Contains(err.Error(), "not found") { + t.Errorf("expected error, got %s", err) + } + }) } func newReconcileTestCluster() *anywherev1.Cluster { diff --git a/pkg/curatedpackages/packageinstaller.go b/pkg/curatedpackages/packageinstaller.go index ba473be21c43..02f93cd48571 100644 --- a/pkg/curatedpackages/packageinstaller.go +++ b/pkg/curatedpackages/packageinstaller.go @@ -67,6 +67,23 @@ func (pi *Installer) InstallCuratedPackages(ctx context.Context) { } } +// UpgradeCuratedPackages upgrades curated packages as part of the cluster upgrade. +func (pi *Installer) UpgradeCuratedPackages(ctx context.Context) { + if IsPackageControllerDisabled(pi.spec.Cluster) { + logger.Info("Package controller disabled") + return + } + PrintLicense() + if err := pi.installPackagesController(ctx); err != nil { + logger.MarkWarning("Failed to upgrade the optional EKS-A Curated Package Controller.", "warning", err) + return + } + + if err := pi.installPackages(ctx); err != nil { + logger.MarkWarning("Failed upgrading curated packages on the cluster.", "error", err) + } +} + func (pi *Installer) installPackagesController(ctx context.Context) error { logger.Info("Enabling curated packages on the cluster") err := pi.packageController.Enable(ctx) diff --git a/pkg/curatedpackages/regional_registry.go b/pkg/curatedpackages/regional_registry.go index 077cac7188bf..c977282102ab 100644 --- a/pkg/curatedpackages/regional_registry.go +++ b/pkg/curatedpackages/regional_registry.go @@ -17,7 +17,7 @@ import ( const ( devRegionalECR string = "067575901363.dkr.ecr.us-west-2.amazonaws.com" devRegionalPublicECR string = "public.ecr.aws/x3k6m8v0" - stagingRegionalECR string = "TODO.dkr.ecr.us-west-2.amazonaws.com" + stagingRegionalECR string = "067575901363.dkr.ecr.us-west-2.amazonaws.com" ) var prodRegionalECRMap = map[string]string{ diff --git a/pkg/curatedpackages/testdata/values_empty.yaml b/pkg/curatedpackages/testdata/values_empty.yaml index 0b869c7592a8..65970c5779da 100644 --- a/pkg/curatedpackages/testdata/values_empty.yaml +++ b/pkg/curatedpackages/testdata/values_empty.yaml @@ -1,7 +1,7 @@ registryMirrorSecret: endpoint: "" - username: "" - password: "" + username: "dXNlcm5hbWU=" + password: "cGFzc3dvcmQ=" cacertcontent: "" insecure: "ZmFsc2U=" awsSecret: diff --git a/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml b/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml index 0e7b260c6859..4c3038dcce45 100644 --- a/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml +++ b/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml @@ -1,7 +1,7 @@ registryMirrorSecret: endpoint: "" - username: "" - password: "" + username: "dXNlcm5hbWU=" + password: "cGFzc3dvcmQ=" cacertcontent: "" insecure: "ZmFsc2U=" awsSecret: diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 66966a432b0d..60f855589f86 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -94,7 +94,7 @@ type Dependencies struct { closers []types.Closer CliConfig *cliconfig.CliConfig CreateCliConfig *cliconfig.CreateClusterCLIConfig - PackageInstaller interfaces.PackageInstaller + PackageManager interfaces.PackageManager BundleRegistry curatedpackages.BundleRegistry PackageControllerClient *curatedpackages.PackageControllerClient PackageClient curatedpackages.PackageHandler @@ -115,6 +115,7 @@ type Dependencies struct { EksaInstaller *clustermanager.EKSAInstaller DeleteClusterDefaulter cli.DeleteClusterDefaulter ClusterDeleter clustermanager.Deleter + ClusterMover *clustermanager.Mover } // KubeClients defines super struct that exposes all behavior. @@ -340,6 +341,12 @@ func (f *Factory) WithDockerLogin() *Factory { } func (f *Factory) WithExecutableBuilder() *Factory { + // Ensure the file writer is created before the tools container is launched. This is necessary + // because we bind mount the cluster directory into the tools container. If the directory + // doesn't exist, dockerd (running as root) creates the hostpath for the bind mount with root + // ownership. This prevents further files from being written to the cluster directory. + f.WithWriter() + if f.executablesConfig.useDockerContainer { f.WithExecutableImage().WithDocker() if f.registryMirror != nil && f.registryMirror.Auth { @@ -1216,6 +1223,26 @@ func (f *Factory) WithClusterDeleter() *Factory { return f } +// WithClusterMover builds a cluster mover. +func (f *Factory) WithClusterMover() *Factory { + f.WithLogger().WithUnAuthKubeClient().WithLogger() + + f.buildSteps = append(f.buildSteps, func(_ context.Context) error { + var opts []clustermanager.MoverOpt + if f.config.noTimeouts { + opts = append(opts, clustermanager.WithMoverNoTimeouts()) + } + + f.dependencies.ClusterMover = clustermanager.NewMover( + f.dependencies.Logger, + f.dependencies.UnAuthKubeClient, + opts..., + ) + return nil + }) + return f +} + // WithValidatorClients builds KubeClients. func (f *Factory) WithValidatorClients() *Factory { f.WithKubectl().WithUnAuthKubeClient() @@ -1295,16 +1322,17 @@ func (f *Factory) WithGitOpsFlux(clusterConfig *v1alpha1.Cluster, fluxConfig *v1 return f } -func (f *Factory) WithPackageInstaller(spec *cluster.Spec, packagesLocation, kubeConfig string) *Factory { +// WithPackageManager builds a package manager. +func (f *Factory) WithPackageManager(spec *cluster.Spec, packagesLocation, kubeConfig string) *Factory { f.WithKubectl().WithPackageControllerClient(spec, kubeConfig).WithPackageClient() - f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { - if f.dependencies.PackageInstaller != nil { + f.buildSteps = append(f.buildSteps, func(_ context.Context) error { + if f.dependencies.PackageManager != nil { return nil } managementClusterName := getManagementClusterName(spec) mgmtKubeConfig := kubeconfig.ResolveFilename(kubeConfig, managementClusterName) - f.dependencies.PackageInstaller = curatedpackages.NewInstaller( + f.dependencies.PackageManager = curatedpackages.NewInstaller( f.dependencies.Kubectl, f.dependencies.PackageClient, f.dependencies.PackageControllerClient, @@ -1317,10 +1345,18 @@ func (f *Factory) WithPackageInstaller(spec *cluster.Spec, packagesLocation, kub return f } -func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig string) *Factory { +// WithPackageManagerWithoutWait builds a package manager that doesn't wait for active bundles. +func (f *Factory) WithPackageManagerWithoutWait(spec *cluster.Spec, packagesLocation, kubeConfig string) *Factory { + f.WithPackageControllerClient(spec, kubeConfig, curatedpackages.WithSkipWait()). + WithPackageManager(spec, packagesLocation, kubeConfig) + return f +} + +// WithPackageControllerClient builds a client for package controller. +func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig string, opts ...curatedpackages.PackageControllerClientOpt) *Factory { f.WithHelm(helm.WithInsecure()).WithKubectl() - f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { + f.buildSteps = append(f.buildSteps, func(_ context.Context) error { if f.dependencies.PackageControllerClient != nil || spec == nil { return nil } @@ -1347,13 +1383,8 @@ func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig str if bundle == nil { return fmt.Errorf("could not find VersionsBundle") } - f.dependencies.PackageControllerClient = curatedpackages.NewPackageControllerClient( - f.dependencies.Helm, - f.dependencies.Kubectl, - spec.Cluster.Name, - mgmtKubeConfig, - &bundle.PackageController.HelmChart, - f.registryMirror, + + options := []curatedpackages.PackageControllerClientOpt{ curatedpackages.WithEksaAccessKeyId(eksaAccessKeyID), curatedpackages.WithEksaSecretAccessKey(eksaSecretKey), curatedpackages.WithEksaRegion(eksaRegion), @@ -1364,6 +1395,18 @@ func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig str curatedpackages.WithManagementClusterName(managementClusterName), curatedpackages.WithValuesFileWriter(writer), curatedpackages.WithClusterSpec(spec), + } + + options = append(options, opts...) + + f.dependencies.PackageControllerClient = curatedpackages.NewPackageControllerClient( + f.dependencies.Helm, + f.dependencies.Kubectl, + spec.Cluster.Name, + mgmtKubeConfig, + &bundle.PackageController.HelmChart, + f.registryMirror, + options..., ) return nil }) diff --git a/pkg/dependencies/factory_test.go b/pkg/dependencies/factory_test.go index 626e412574df..7e43b4246630 100644 --- a/pkg/dependencies/factory_test.go +++ b/pkg/dependencies/factory_test.go @@ -433,10 +433,46 @@ func TestFactoryBuildWithPackageInstaller(t *testing.T) { WithLocalExecutables(). WithHelm(helm.WithInsecure()). WithKubectl(). - WithPackageInstaller(spec, "/test/packages.yaml", "kubeconfig.kubeconfig"). + WithPackageManager(spec, "/test/packages.yaml", "kubeconfig.kubeconfig"). Build(context.Background()) tt.Expect(err).To(BeNil()) - tt.Expect(deps.PackageInstaller).NotTo(BeNil()) + tt.Expect(deps.PackageManager).NotTo(BeNil()) +} + +func TestFactoryBuildWithPackageInstallerWithoutWait(t *testing.T) { + spec := &cluster.Spec{ + Config: &cluster.Config{ + Cluster: &anywherev1.Cluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "1.19", + }, + }, + }, + VersionsBundles: map[anywherev1.KubernetesVersion]*cluster.VersionsBundle{ + "1.19": { + VersionsBundle: &v1alpha1.VersionsBundle{ + PackageController: v1alpha1.PackageBundle{ + HelmChart: v1alpha1.Image{ + URI: "test_registry/test/eks-anywhere-packages:v1", + Name: "test_chart", + }, + }, + }, + }, + }, + } + tt := newTest(t, vsphere) + deps, err := dependencies.NewFactory(). + WithLocalExecutables(). + WithHelm(helm.WithInsecure()). + WithKubectl(). + WithPackageManagerWithoutWait(spec, "/test/packages.yaml", "kubeconfig.kubeconfig"). + Build(context.Background()) + tt.Expect(err).To(BeNil()) + tt.Expect(deps.PackageManager).NotTo(BeNil()) } func TestFactoryBuildWithCuratedPackagesCustomRegistry(t *testing.T) { @@ -645,6 +681,18 @@ func TestFactoryBuildWithClusterDeleterNoTimeout(t *testing.T) { tt.Expect(deps.ClusterApplier).NotTo(BeNil()) } +func TestFactoryBuildWithClusterMoverNoTimeout(t *testing.T) { + tt := newTest(t, vsphere) + deps, err := dependencies.NewFactory(). + WithLocalExecutables(). + WithNoTimeouts(). + WithClusterMover(). + Build(context.Background()) + + tt.Expect(err).To(BeNil()) + tt.Expect(deps.ClusterApplier).NotTo(BeNil()) +} + func TestFactoryBuildWithAwsIamAuthNoTimeout(t *testing.T) { tt := newTest(t, vsphere) deps, err := dependencies.NewFactory(). diff --git a/pkg/docker/registry.go b/pkg/docker/registry.go index 585dcc7a3406..276a0dede0ad 100644 --- a/pkg/docker/registry.go +++ b/pkg/docker/registry.go @@ -5,8 +5,10 @@ import ( "fmt" "runtime" "strings" + "time" "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/retrier" ) // These constants are temporary since currently there is a limitation on harbor @@ -64,12 +66,14 @@ func (d *ImageRegistryDestination) Write(ctx context.Context, images ...string) type ImageOriginalRegistrySource struct { client ImagePuller processor *ConcurrentImageProcessor + Retrier retrier.Retrier } func NewOriginalRegistrySource(client ImagePuller) *ImageOriginalRegistrySource { return &ImageOriginalRegistrySource{ client: client, processor: NewConcurrentImageProcessor(runtime.GOMAXPROCS(0)), + Retrier: *retrier.NewWithMaxRetries(5, 200*time.Second), } } @@ -79,7 +83,8 @@ func (s *ImageOriginalRegistrySource) Load(ctx context.Context, images ...string logger.V(3).Info("Starting pull", "numberOfImages", len(images)) err := s.processor.Process(ctx, images, func(ctx context.Context, image string) error { - if err := s.client.PullImage(ctx, image); err != nil { + err := s.Retrier.Retry(func() error { return s.client.PullImage(ctx, image) }) + if err != nil { return err } diff --git a/pkg/docker/registry_test.go b/pkg/docker/registry_test.go index 9fd82622dbf2..61ef9816ce63 100644 --- a/pkg/docker/registry_test.go +++ b/pkg/docker/registry_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/docker" "github.com/aws/eks-anywhere/pkg/docker/mocks" + "github.com/aws/eks-anywhere/pkg/retrier" ) func TestNewRegistryDestination(t *testing.T) { @@ -119,6 +120,7 @@ func TestNewOriginalRegistrySource(t *testing.T) { images := []string{"image1:1", "image2:2"} ctx := context.Background() dstLoader := docker.NewOriginalRegistrySource(client) + dstLoader.Retrier = *retrier.NewWithMaxRetries(1, 0) for _, i := range images { client.EXPECT().PullImage(test.AContext(), i) } @@ -134,6 +136,7 @@ func TestOriginalRegistrySourceError(t *testing.T) { images := []string{"image1:1", "image2:2"} ctx := context.Background() dstLoader := docker.NewOriginalRegistrySource(client) + dstLoader.Retrier = *retrier.NewWithMaxRetries(1, 0) client.EXPECT().PullImage(test.AContext(), images[0]).Return(errors.New("error pulling")) client.EXPECT().PullImage(test.AContext(), images[1]).MaxTimes(1) diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index 1cb4cc67bf83..64551202c172 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -291,6 +291,35 @@ func (c *Cmk) ValidateDomainAndGetId(ctx context.Context, profile string, domain return domainId, nil } +// EnsureNoDuplicateNetwork ensures that there are no duplicate networks with the name networkName. +// If it finds duplicates that are not shared networks, it deletes them. +func (c *Cmk) EnsureNoDuplicateNetwork(ctx context.Context, profile string, networkName string) error { + command := newCmkCommand(fmt.Sprintf("list networks filter=name,id,type keyword=%s", networkName)) + result, err := c.exec(ctx, profile, command...) + if err != nil { + return fmt.Errorf("getting network info - %s: %v", result.String(), err) + } + + response := struct { + CmkNetworks []cmkNetwork `json:"network"` + }{} + if err = json.Unmarshal(result.Bytes(), &response); err != nil { + return fmt.Errorf("parsing response into json: %v", err) + } + + for _, network := range response.CmkNetworks { + if !strings.EqualFold(network.Type, "Shared") { + command := newCmkCommand(fmt.Sprintf("delete network id=%s force=true", network.ID)) + result, err := c.exec(ctx, profile, command...) + if err != nil { + return fmt.Errorf("deleting duplicate network with ID %s - %s: %v", network, result.String(), err) + } + } + } + + return nil +} + func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string) error { command := newCmkCommand("list networks") // account must be specified within a domainId @@ -436,10 +465,6 @@ func (c *Cmk) CleanupVms(ctx context.Context, profile string, clusterName string } func (c *Cmk) exec(ctx context.Context, profile string, args ...string) (stdout bytes.Buffer, err error) { - if err != nil { - return bytes.Buffer{}, fmt.Errorf("failed get environment map: %v", err) - } - configFile, err := c.buildCmkConfigFile(profile) if err != nil { return bytes.Buffer{}, fmt.Errorf("failed cmk validations: %v", err) @@ -490,6 +515,12 @@ type cmkServiceOffering struct { Name string `json:"name"` } +type cmkNetwork struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` +} + type cmkResourceIdentifier struct { Id string `json:"id"` Name string `json:"name"` diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index 01c9843b4004..7a265f02cd9d 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -215,6 +215,79 @@ func TestCmkCleanupVms(t *testing.T) { } } +func TestCmkEnsureNoDuplicateNetwork(t *testing.T) { + _, writer := test.NewWriter(t) + configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) + tests := []struct { + testName string + argumentsExecCalls [][]string + jsonResponseFile string + cmkFunc func(cmk executables.Cmk, ctx context.Context) error + cmkResponseError error + wantErr bool + }{ + { + testName: "EnsureNoDuplicateNetwork success on no duplicate networks", + jsonResponseFile: "testdata/cmk_list_network_multiple.json", + argumentsExecCalls: [][]string{ + { + "-c", configFilePath, + "list", "networks", "filter=name,id,type", "keyword=eksa-cloudstack-ci-net", + }, + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.EnsureNoDuplicateNetwork(ctx, execConfig.Profiles[0].Name, "eksa-cloudstack-ci-net") + }, + cmkResponseError: nil, + wantErr: false, + }, + { + testName: "EnsureNoDuplicateNetwork success on deleting duplicate networks", + jsonResponseFile: "testdata/cmk_list_network_duplicates.json", + argumentsExecCalls: [][]string{ + { + "-c", configFilePath, + "list", "networks", "filter=name,id,type", "keyword=eksa-cloudstack-ci-net", + }, + { + "-c", configFilePath, + "delete", "network", "id=fe1a7310-51d4-4299-b3d0-a627a57bb4b0", "force=true", + }, + { + "-c", configFilePath, + "delete", "network", "id=24fd6849-3016-4afe-948d-4ce2bb396cf5", "force=true", + }, + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.EnsureNoDuplicateNetwork(ctx, execConfig.Profiles[0].Name, "eksa-cloudstack-ci-net") + }, + cmkResponseError: nil, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + fileContent := test.ReadFile(t, tt.jsonResponseFile) + + ctx := context.Background() + mockCtrl := gomock.NewController(t) + + executable := mockexecutables.NewMockExecutable(mockCtrl) + for _, argsList := range tt.argumentsExecCalls { + executable.EXPECT().Execute(ctx, argsList). + Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) + } + cmk, _ := executables.NewCmk(executable, writer, execConfig) + err := tt.cmkFunc(*cmk, ctx) + if tt.wantErr && err != nil || !tt.wantErr && err == nil { + return + } + t.Fatalf("Cmk error: %v", err) + }) + } +} + func TestNewCmkNilConfig(t *testing.T) { _, err := executables.NewCmk(nil, nil, nil) if err == nil { diff --git a/pkg/executables/testdata/cmk_list_network_duplicates.json b/pkg/executables/testdata/cmk_list_network_duplicates.json new file mode 100644 index 000000000000..cd8f37087fd7 --- /dev/null +++ b/pkg/executables/testdata/cmk_list_network_duplicates.json @@ -0,0 +1,20 @@ +{ + "count": 3, + "network": [ + { + "id": "fe1a7310-51d4-4299-b3d0-a627a57bb4b0", + "name": "eksa-cloudstack-ci-net", + "type": "Isolated" + }, + { + "id": "24fd6849-3016-4afe-948d-4ce2bb396cf5", + "name": "eksa-cloudstack-ci-net", + "type": "Isolated" + }, + { + "id": "13b501c1-5629-40e1-ba1e-a31caa9aead4", + "name": "eksa-cloudstack-ci-net", + "type": "Shared" + } + ] +} diff --git a/pkg/helm/download.go b/pkg/helm/download.go index a97d373fc747..eab31434fd80 100644 --- a/pkg/helm/download.go +++ b/pkg/helm/download.go @@ -4,8 +4,10 @@ import ( "context" "fmt" "sort" + "time" "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/utils/oci" ) @@ -13,12 +15,15 @@ import ( type ChartRegistryDownloader struct { client Client dstFolder string + + Retrier retrier.Retrier } func NewChartRegistryDownloader(client Client, dstFolder string) *ChartRegistryDownloader { return &ChartRegistryDownloader{ client: client, dstFolder: dstFolder, + Retrier: *retrier.NewWithMaxRetries(5, 200*time.Second), } } @@ -26,7 +31,10 @@ func (d *ChartRegistryDownloader) Download(ctx context.Context, charts ...string for _, chart := range uniqueCharts(charts) { chartURL, chartVersion := oci.ChartURLAndVersion(chart) logger.Info("Saving helm chart to disk", "chart", chart) - if err := d.client.SaveChart(ctx, chartURL, chartVersion, d.dstFolder); err != nil { + err := d.Retrier.Retry(func() error { + return d.client.SaveChart(ctx, chartURL, chartVersion, d.dstFolder) + }) + if err != nil { return fmt.Errorf("downloading chart [%s] from registry: %v", chart, err) } } diff --git a/pkg/helm/download_test.go b/pkg/helm/download_test.go index 27cf2aff14ab..a3e1d486a167 100644 --- a/pkg/helm/download_test.go +++ b/pkg/helm/download_test.go @@ -10,6 +10,7 @@ import ( "github.com/aws/eks-anywhere/pkg/helm" "github.com/aws/eks-anywhere/pkg/helm/mocks" + "github.com/aws/eks-anywhere/pkg/retrier" ) func TestChartRegistryDownloaderDownload(t *testing.T) { @@ -37,5 +38,6 @@ func TestChartRegistryDownloaderDownloadError(t *testing.T) { client.EXPECT().SaveChart(ctx, "oci://ecr.com/chart2", "v2.2.0", folder).Return(errors.New("failed downloading")) d := helm.NewChartRegistryDownloader(client, folder) + d.Retrier = *retrier.NewWithMaxRetries(1, 0) g.Expect(d.Download(ctx, charts...)).To(MatchError(ContainSubstring("downloading chart [ecr.com/chart2:v2.2.0] from registry: failed downloading"))) } diff --git a/pkg/helm/factory.go b/pkg/helm/factory.go index 0f56fe0a269b..802fa9b2ffcb 100644 --- a/pkg/helm/factory.go +++ b/pkg/helm/factory.go @@ -52,7 +52,8 @@ func (f *ClientFactory) Get(ctx context.Context, clus *anywherev1.Cluster) (Clie } r := registrymirror.FromCluster(managmentCluster) - helmClient := f.builder.BuildHelm(WithRegistryMirror(r), WithInsecure()) + p := managmentCluster.ProxyConfiguration() + helmClient := f.builder.BuildHelm(WithRegistryMirror(r), WithInsecure(), WithProxyConfig(p)) if r != nil && managmentCluster.RegistryAuth() { if err := helmClient.RegistryLogin(ctx, r.BaseRegistry, rUsername, rPassword); err != nil { diff --git a/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml index 2839ead16697..9a22c3d48ede 100644 --- a/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml @@ -13,8 +13,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - print_status_and_cleanup + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - status command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -46,8 +47,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - upgrade_containerd + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - containerd command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -62,8 +64,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - cni_plugins + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - cni-plugins command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -78,9 +81,14 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubeadm_in_first_cp + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - node + - --type + - FirstCP + - --k8sVersion - v1.28.3-eks-1-28-9 + - --etcdVersion - v3.5.9-eks-1-28-9 command: - nsenter @@ -96,8 +104,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubelet_and_kubectl + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - kubelet-kubectl command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest diff --git a/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml index 393e88afbe54..263cbbf5d99e 100755 --- a/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml @@ -13,8 +13,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - print_status_and_cleanup + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - status command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -46,8 +47,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - upgrade_containerd + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - containerd command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -62,8 +64,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - cni_plugins + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - cni-plugins command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -78,8 +81,11 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubeadm_in_rest_cp + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - node + - --type + - RestCP command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -94,8 +100,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubelet_and_kubectl + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - kubelet-kubectl command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest diff --git a/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml index d08637883592..8538f062281a 100755 --- a/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml @@ -13,8 +13,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - print_status_and_cleanup + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - status command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -43,8 +44,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - upgrade_containerd + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - containerd command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -59,8 +61,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - cni_plugins + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - cni-plugins command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -75,8 +78,11 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubeadm_in_worker + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - node + - --type + - Worker command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -91,8 +97,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubelet_and_kubectl + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - kubelet-kubectl command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest diff --git a/pkg/nodeupgrader/upgrader.go b/pkg/nodeupgrader/upgrader.go index 7e2f3213a41e..f8c654bdeb18 100644 --- a/pkg/nodeupgrader/upgrader.go +++ b/pkg/nodeupgrader/upgrader.go @@ -11,7 +11,7 @@ import ( ) const ( - upgradeScript = "/foo/eksa-upgrades/scripts/upgrade.sh" + upgradeBin = "/foo/eksa-upgrades/tools/upgrader" // CopierContainerName holds the name of the components copier container. CopierContainerName = "components-copier" @@ -40,21 +40,21 @@ func PodName(nodeName string) string { // UpgradeFirstControlPlanePod returns an upgrader pod that should be deployed on the first control plane node. func UpgradeFirstControlPlanePod(nodeName, image, kubernetesVersion, etcdVersion string) *corev1.Pod { p := upgraderPod(nodeName, image, true) - p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_first_cp", kubernetesVersion, etcdVersion) + p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "upgrade", "node", "--type", "FirstCP", "--k8sVersion", kubernetesVersion, "--etcdVersion", etcdVersion) return p } // UpgradeSecondaryControlPlanePod returns an upgrader pod that can be deployed on the remaining control plane nodes. func UpgradeSecondaryControlPlanePod(nodeName, image string) *corev1.Pod { p := upgraderPod(nodeName, image, true) - p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_rest_cp") + p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "upgrade", "node", "--type", "RestCP") return p } // UpgradeWorkerPod returns an upgrader pod that can be deployed on worker nodes. func UpgradeWorkerPod(nodeName, image string) *corev1.Pod { p := upgraderPod(nodeName, image, false) - p.Spec.InitContainers = containersForUpgrade(false, image, nodeName, "kubeadm_in_worker") + p.Spec.InitContainers = containersForUpgrade(false, image, nodeName, "upgrade", "node", "--type", "Worker") return p } @@ -76,7 +76,7 @@ func upgraderPod(nodeName, image string, isCP bool) *corev1.Pod { HostPID: true, Volumes: volumes, Containers: []corev1.Container{ - nsenterContainer(image, PostUpgradeContainerName, upgradeScript, "print_status_and_cleanup"), + nsenterContainer(image, PostUpgradeContainerName, upgradeBin, "upgrade", "status"), }, RestartPolicy: corev1.RestartPolicyOnFailure, }, @@ -86,10 +86,10 @@ func upgraderPod(nodeName, image string, isCP bool) *corev1.Pod { func containersForUpgrade(isCP bool, image, nodeName string, kubeadmUpgradeCommand ...string) []corev1.Container { return []corev1.Container{ copierContainer(image, isCP), - nsenterContainer(image, ContainerdUpgraderContainerName, upgradeScript, "upgrade_containerd"), - nsenterContainer(image, CNIPluginsUpgraderContainerName, upgradeScript, "cni_plugins"), - nsenterContainer(image, KubeadmUpgraderContainerName, append([]string{upgradeScript}, kubeadmUpgradeCommand...)...), - nsenterContainer(image, KubeletUpgradeContainerName, upgradeScript, "kubelet_and_kubectl"), + nsenterContainer(image, ContainerdUpgraderContainerName, upgradeBin, "upgrade", "containerd"), + nsenterContainer(image, CNIPluginsUpgraderContainerName, upgradeBin, "upgrade", "cni-plugins"), + nsenterContainer(image, KubeadmUpgraderContainerName, append([]string{upgradeBin}, kubeadmUpgradeCommand...)...), + nsenterContainer(image, KubeletUpgradeContainerName, upgradeBin, "upgrade", "kubelet-kubectl"), } } diff --git a/pkg/providers/cloudstack/config/template-cp.yaml b/pkg/providers/cloudstack/config/template-cp.yaml index ed64fa9f0acd..ee9115402b30 100644 --- a/pkg/providers/cloudstack/config/template-cp.yaml +++ b/pkg/providers/cloudstack/config/template-cp.yaml @@ -156,6 +156,13 @@ spec: {{ .schedulerExtraArgs.ToYaml | indent 10 }} {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8}} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .encryptionProviderConfig }} - content: | {{ .encryptionProviderConfig | indent 8}} @@ -294,6 +301,10 @@ spec: path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem {{- end}} initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: @@ -316,6 +327,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: diff --git a/pkg/providers/cloudstack/config/template-md.yaml b/pkg/providers/cloudstack/config/template-md.yaml index 81df17e3cc60..4229bbe63874 100644 --- a/pkg/providers/cloudstack/config/template-md.yaml +++ b/pkg/providers/cloudstack/config/template-md.yaml @@ -7,6 +7,10 @@ spec: template: spec: joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock {{- if .workerNodeGroupTaints }} @@ -29,9 +33,16 @@ spec: {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} name: "{{`{{ ds.meta_data.hostname }}`}}" -{{- if or .proxyConfig .registryMirrorMap }} +{{- if or (or .proxyConfig .registryMirrorMap) .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .proxyConfig }} - content: | [Service] diff --git a/pkg/providers/cloudstack/template.go b/pkg/providers/cloudstack/template.go index 53b4cce84cb4..eb6cb5fdf081 100644 --- a/pkg/providers/cloudstack/template.go +++ b/pkg/providers/cloudstack/template.go @@ -4,6 +4,8 @@ import ( "fmt" "net" + "sigs.k8s.io/yaml" + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" @@ -231,7 +233,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro values["externalEtcd"] = true values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } @@ -257,6 +259,17 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro values["encryptionProviderConfig"] = conf } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } @@ -389,6 +402,16 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration fillProxyConfigurations(values, clusterSpec, endpoint) } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } diff --git a/pkg/providers/cloudstack/template_test.go b/pkg/providers/cloudstack/template_test.go index 76c4043d3715..a027ae93a68e 100644 --- a/pkg/providers/cloudstack/template_test.go +++ b/pkg/providers/cloudstack/template_test.go @@ -6,6 +6,7 @@ import ( "time" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -169,3 +170,32 @@ func TestTemplateBuilder_CertSANs(t *testing.T) { test.AssertContentToFile(t, string(data), tc.Output) } } + +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigWN(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) + spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + builder := cloudstack.NewTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigCP(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) + spec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + spec.Cluster.Spec.ExternalEtcdConfiguration = nil + builder := cloudstack.NewTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { + values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) + }) + g.Expect(err).ToNot(HaveOccurred()) +} diff --git a/pkg/providers/common/common.go b/pkg/providers/common/common.go index 70803b776535..9a1ebd6d52ef 100644 --- a/pkg/providers/common/common.go +++ b/pkg/providers/common/common.go @@ -152,8 +152,12 @@ func GetCAPIBottlerocketSettingsConfig(config *v1alpha1.BottlerocketConfiguratio // GetExternalEtcdReleaseURL returns a valid etcd URL from version bundles if the eksaVersion is greater than // MinEksAVersionWithEtcdURL. Return "" if eksaVersion < MinEksAVersionWithEtcdURL to prevent etcd node rolled out. -func GetExternalEtcdReleaseURL(clusterVersion string, versionBundle *cluster.VersionsBundle) (string, error) { - clusterVersionSemVer, err := semver.New(clusterVersion) +func GetExternalEtcdReleaseURL(clusterVersion *v1alpha1.EksaVersion, versionBundle *cluster.VersionsBundle) (string, error) { + if clusterVersion == nil { + logger.V(4).Info("Eks-a cluster version is not specified. Skip setting etcd url") + return "", nil + } + clusterVersionSemVer, err := semver.New(string(*clusterVersion)) if err != nil { return "", fmt.Errorf("invalid semver for clusterVersion: %v", err) } diff --git a/pkg/providers/common/common_test.go b/pkg/providers/common/common_test.go index 0b659999b203..79add9c0a595 100644 --- a/pkg/providers/common/common_test.go +++ b/pkg/providers/common/common_test.go @@ -175,7 +175,8 @@ func TestGetExternalEtcdReleaseURL(t *testing.T) { } for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { - got, err := common.GetExternalEtcdReleaseURL(tt.clusterVersion, test.VersionBundle()) + eksaVersion := v1alpha1.EksaVersion(tt.clusterVersion) + got, err := common.GetExternalEtcdReleaseURL(&eksaVersion, test.VersionBundle()) if tt.err == nil { g.Expect(err).ToNot(HaveOccurred()) } else { @@ -185,3 +186,10 @@ func TestGetExternalEtcdReleaseURL(t *testing.T) { }) } } + +func TestGetExternalEtcdReleaseURLWithNilEksaVersion(t *testing.T) { + g := NewWithT(t) + got, err := common.GetExternalEtcdReleaseURL(nil, test.VersionBundle()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeEmpty()) +} diff --git a/pkg/providers/docker/config/template-cp.yaml b/pkg/providers/docker/config/template-cp.yaml index 2cb630b6f527..b9f8434e7bc9 100644 --- a/pkg/providers/docker/config/template-cp.yaml +++ b/pkg/providers/docker/config/template-cp.yaml @@ -137,6 +137,13 @@ spec: {{ .schedulerExtraArgs.ToYaml | indent 10 }} {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8}} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} - content: | {{ .auditPolicy | indent 8 }} owner: root:root @@ -209,6 +216,10 @@ spec: path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem {{- end}} initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: @@ -230,6 +241,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: diff --git a/pkg/providers/docker/config/template-md.yaml b/pkg/providers/docker/config/template-md.yaml index 94d8867e7437..b0e9cc8592ce 100644 --- a/pkg/providers/docker/config/template-md.yaml +++ b/pkg/providers/docker/config/template-md.yaml @@ -7,6 +7,10 @@ spec: template: spec: joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock {{- if .workerNodeGroupTaints }} @@ -26,9 +30,16 @@ spec: {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} -{{- if .registryMirrorMap }} +{{- if or .registryMirrorMap .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .registryCACert }} - content: | {{ .registryCACert | indent 10 }} diff --git a/pkg/providers/docker/docker.go b/pkg/providers/docker/docker.go index b74665a028b3..77e91dd9d908 100644 --- a/pkg/providers/docker/docker.go +++ b/pkg/providers/docker/docker.go @@ -12,6 +12,7 @@ import ( etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/bootstrapper" @@ -329,7 +330,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { values["externalEtcd"] = true values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } @@ -358,6 +359,16 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro values["maxSurge"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("marshaling control plane node Kubelet Configuration while building CAPI template %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } @@ -397,6 +408,16 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration } } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("marshaling Kubelet Configuration for worker node %s: %v", workerNodeGroupConfiguration.Name, err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } diff --git a/pkg/providers/docker/docker_test.go b/pkg/providers/docker/docker_test.go index b5df9fa043a5..774ee109f1e4 100644 --- a/pkg/providers/docker/docker_test.go +++ b/pkg/providers/docker/docker_test.go @@ -14,6 +14,7 @@ import ( . "github.com/onsi/gomega" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -812,10 +813,9 @@ func TestDockerTemplateBuilderGenerateCAPISpecControlPlane(t *testing.T) { buildOptions []providers.BuildMapOption } tests := []struct { - name string - args args - wantContent []byte - wantErr error + name string + args args + wantErr error }{ { name: "kube 119 test", @@ -838,6 +838,28 @@ func TestDockerTemplateBuilderGenerateCAPISpecControlPlane(t *testing.T) { }, wantErr: fmt.Errorf("error building template map for CP "), }, + { + name: "kubelet config specified", + args: args{ + clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { + s.Cluster.Name = "test-cluster" + s.Cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{ + KubeletConfiguration: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + }, + Count: 1, + Endpoint: &v1alpha1.Endpoint{ + Host: "1.1.1.1", + }, + } + }), + }, + wantErr: nil, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -896,10 +918,9 @@ func TestDockerTemplateBuilderGenerateCAPISpecWorkers(t *testing.T) { clusterSpec *cluster.Spec } tests := []struct { - name string - args args - wantContent []byte - wantErr error + name string + args args + wantErr error }{ { name: "kube version not specified", @@ -911,6 +932,28 @@ func TestDockerTemplateBuilderGenerateCAPISpecWorkers(t *testing.T) { }, wantErr: fmt.Errorf("error building template map for MD "), }, + { + name: "kubelet config specified", + args: args{ + clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { + s.Cluster.Name = "test-cluster" + s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{ + { + KubeletConfiguration: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + }, + Count: ptr.Int(1), + Name: "test", + }, + } + }), + }, + wantErr: nil, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index c9d7694166f8..82acdc53dc66 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -5,7 +5,33 @@ metadata: name: "{{.clusterName}}" namespace: "{{.eksaSystemNamespace}}" spec: +{{- if .failureDomains }} + failureDomains: + {{- range $index, $value := .failureDomains}} + - name: "{{ $value.Name }}" + cluster: + {{- if (eq $value.Cluster.Type "uuid") }} + type: "uuid" + uuid: "{{ $value.Cluster.UUID }}" + {{- else if (eq $value.Cluster.Type "name") }} + type: "name" + name: "{{ $value.Cluster.Name }}" + {{- end}} + subnets: + {{- range $value.Subnets}} + {{- if (eq .Type "uuid") }} + - type: "uuid" + uuid: "{{ .UUID }}" + {{- else if (eq .Type "name") }} + - type: "name" + name: "{{ .Name }}" + {{- end}} + {{- end}} + controlPlane: true + {{- end }} +{{- else }} failureDomains: [] +{{- end}} prismCentral: {{- if .nutanixAdditionalTrustBundle }} additionalTrustBundle: @@ -147,6 +173,13 @@ spec: imageTag: {{.etcdImageTag}} {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .encryptionProviderConfig }} - content: | {{ .encryptionProviderConfig | indent 8}} @@ -294,6 +327,10 @@ spec: owner: root:root path: /etc/kubernetes/audit-policy.yaml initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: kubeletExtraArgs: cloud-provider: external @@ -316,6 +353,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: @@ -781,3 +822,31 @@ spec: name: user-ca-bundle {{- end }} strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "{{.clusterName}}-nutanix-ccm-secret" + namespace: "{{.eksaSystemNamespace}}" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "{{ .nutanixPCUsername }}", + "password": "{{ .nutanixPCPassword }}" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/config/md-template.yaml b/pkg/providers/nutanix/config/md-template.yaml index d62afebeb444..ac63a9ffff75 100644 --- a/pkg/providers/nutanix/config/md-template.yaml +++ b/pkg/providers/nutanix/config/md-template.yaml @@ -111,6 +111,10 @@ spec: {{- end }} - hostnamectl set-hostname "{{`{{ ds.meta_data.hostname }}`}}" joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: kubeletExtraArgs: cloud-provider: external @@ -139,9 +143,16 @@ spec: sudo: ALL=(ALL) NOPASSWD:ALL sshAuthorizedKeys: - "{{.workerSshAuthorizedKey}}" -{{- if or .proxyConfig .registryMirrorMap }} +{{- if or (or .proxyConfig .registryMirrorMap) .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .proxyConfig }} - content: | [Service] diff --git a/pkg/providers/nutanix/controlplane.go b/pkg/providers/nutanix/controlplane.go index c2b1fc4fd898..d774f8ba0e81 100644 --- a/pkg/providers/nutanix/controlplane.go +++ b/pkg/providers/nutanix/controlplane.go @@ -27,6 +27,7 @@ type ControlPlane struct { BaseControlPlane ConfigMaps []*corev1.ConfigMap ClusterResourceSets []*addonsv1.ClusterResourceSet + Secrets []*corev1.Secret } // Objects returns the control plane objects associated with the Nutanix cluster. @@ -34,6 +35,7 @@ func (p ControlPlane) Objects() []kubernetes.Object { o := p.BaseControlPlane.Objects() o = appendKubeObjects[*corev1.ConfigMap](o, p.ConfigMaps) o = appendKubeObjects[*addonsv1.ClusterResourceSet](o, p.ClusterResourceSets) + o = appendKubeObjects[*corev1.Secret](o, p.Secrets) return o } @@ -154,6 +156,12 @@ func newControlPlaneParser(logger logr.Logger) (*yamlutil.Parser, *ControlPlaneB return &addonsv1.ClusterResourceSet{} }, ), + yamlutil.NewMapping( + constants.SecretKind, + func() yamlutil.APIObject { + return &corev1.Secret{} + }, + ), ) if err != nil { @@ -183,6 +191,8 @@ func buildObjects(cp *ControlPlane, lookup yamlutil.ObjectLookup) { cp.ConfigMaps = append(cp.ConfigMaps, obj.(*corev1.ConfigMap)) case constants.ClusterResourceSetKind: cp.ClusterResourceSets = append(cp.ClusterResourceSets, obj.(*addonsv1.ClusterResourceSet)) + case constants.SecretKind: + cp.Secrets = append(cp.Secrets, obj.(*corev1.Secret)) } } } diff --git a/pkg/providers/nutanix/provider.go b/pkg/providers/nutanix/provider.go index e5a7de728682..bc330f656a4c 100644 --- a/pkg/providers/nutanix/provider.go +++ b/pkg/providers/nutanix/provider.go @@ -419,7 +419,6 @@ func needsNewEtcdTemplate(oldSpec, newSpec *cluster.Spec, oldNmc, newNmc *v1alph if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number { return true } - return AnyImmutableFieldChanged(oldNmc, newNmc) } diff --git a/pkg/providers/nutanix/provider_test.go b/pkg/providers/nutanix/provider_test.go index 119d683da6fa..5dd4bab83bbe 100644 --- a/pkg/providers/nutanix/provider_test.go +++ b/pkg/providers/nutanix/provider_test.go @@ -530,7 +530,20 @@ func TestNutanixProviderSetupAndValidateDeleteCluster(t *testing.T) { } func TestNutanixProviderSetupAndValidateUpgradeCluster(t *testing.T) { - provider := testDefaultNutanixProvider(t) + ctrl := gomock.NewController(t) + executable := mockexecutables.NewMockExecutable(ctrl) + executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil).AnyTimes() + executable.EXPECT().Execute(gomock.Any(), "get", + "--ignore-not-found", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "--namespace", "default", "eksa-unit-test").Return(*bytes.NewBufferString(nutanixDatacenterConfigSpecJSON), nil).AnyTimes() + kubectl := executables.NewKubectl(executable) + mockClient := mocknutanix.NewMockClient(ctrl) + mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + mockHTTPClient := &http.Client{Transport: mockTransport} + mockWriter := filewritermocks.NewMockFileWriter(ctrl) + provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) + tests := []struct { name string clusterConfFile string @@ -558,7 +571,8 @@ func TestNutanixProviderSetupAndValidateUpgradeCluster(t *testing.T) { for _, tt := range tests { clusterSpec := test.NewFullClusterSpec(t, tt.clusterConfFile) - err := provider.SetupAndValidateUpgradeCluster(context.Background(), &types.Cluster{Name: "eksa-unit-test"}, clusterSpec, clusterSpec) + cluster := &types.Cluster{Name: "eksa-unit-test", KubeconfigFile: "testdata/kubeconfig.yaml"} + err := provider.SetupAndValidateUpgradeCluster(context.Background(), cluster, clusterSpec, clusterSpec) if tt.expectErr { assert.Error(t, err, tt.name) thenErrorExpected(t, tt.expectErrStr, err) diff --git a/pkg/providers/nutanix/reconciler/reconciler.go b/pkg/providers/nutanix/reconciler/reconciler.go index 1ad3ba8a9fb3..2db9ca70ca08 100644 --- a/pkg/providers/nutanix/reconciler/reconciler.go +++ b/pkg/providers/nutanix/reconciler/reconciler.go @@ -3,6 +3,7 @@ package reconciler import ( "context" "fmt" + "os" "reflect" "github.com/go-logr/logr" @@ -137,6 +138,7 @@ func (r *Reconciler) reconcileClusterSecret(ctx context.Context, log logr.Logger // Reconcile reconciles the cluster to the desired state. func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, c *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "nutanix") + clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), c) if err != nil { return controller.Result{}, err @@ -182,6 +184,9 @@ func (r *Reconciler) ValidateClusterSpec(ctx context.Context, log logr.Logger, c return controller.ResultWithReturn(), nil } + os.Setenv(constants.EksaNutanixUsernameKey, creds.PrismCentral.Username) + os.Setenv(constants.EksaNutanixPasswordKey, creds.PrismCentral.Password) + return controller.Result{}, nil } @@ -198,13 +203,16 @@ func (r *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, } func toClientControlPlane(cp *nutanix.ControlPlane) *clusters.ControlPlane { - other := make([]client.Object, 0, len(cp.ConfigMaps)+len(cp.ClusterResourceSets)+1) + other := make([]client.Object, 0, len(cp.ConfigMaps)+len(cp.ClusterResourceSets)+len(cp.Secrets)+1) for _, o := range cp.ClusterResourceSets { other = append(other, o) } for _, o := range cp.ConfigMaps { other = append(other, o) } + for _, o := range cp.Secrets { + other = append(other, o) + } return &clusters.ControlPlane{ Cluster: cp.Cluster, diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index 5b33ad2c90fb..24e93dcc03b2 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -5,6 +5,9 @@ import ( "encoding/json" "fmt" + "sigs.k8s.io/yaml" + + capxv1beta1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -59,7 +62,7 @@ func (ntb *TemplateBuilder) GenerateCAPISpecControlPlane(clusterSpec *cluster.Sp etcdMachineSpec = *ntb.etcdMachineSpec } - values, err := buildTemplateMapCP(ntb.datacenterSpec, clusterSpec, *ntb.controlPlaneMachineSpec, etcdMachineSpec) + values, err := buildTemplateMapCP(ntb.datacenterSpec, clusterSpec, *ntb.controlPlaneMachineSpec, etcdMachineSpec, ntb.creds) if err != nil { return nil, err } @@ -156,6 +159,7 @@ func buildTemplateMapCP( clusterSpec *cluster.Spec, controlPlaneMachineSpec v1alpha1.NutanixMachineConfigSpec, etcdMachineSpec v1alpha1.NutanixMachineConfigSpec, + creds credentials.BasicAuthCredential, ) (map[string]interface{}, error) { versionsBundle := clusterSpec.RootVersionsBundle() format := "cloud-config" @@ -173,6 +177,8 @@ func buildTemplateMapCP( return nil, err } + failureDomains := generateNutanixFailureDomains(datacenterSpec.FailureDomains) + values := map[string]interface{}{ "auditPolicy": auditPolicy, "apiServerExtraArgs": apiServerExtraArgs.ToPartialYaml(), @@ -185,6 +191,7 @@ func buildTemplateMapCP( "controlPlaneTaints": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints, "eksaSystemNamespace": constants.EksaSystemNamespace, "format": format, + "failureDomains": failureDomains, "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, "serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, @@ -218,6 +225,8 @@ func buildTemplateMapCP( "subnetName": controlPlaneMachineSpec.Subnet.Name, "subnetUUID": controlPlaneMachineSpec.Subnet.UUID, "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, + "nutanixPCUsername": creds.PrismCentral.BasicAuth.Username, + "nutanixPCPassword": creds.PrismCentral.BasicAuth.Password, } if controlPlaneMachineSpec.Project != nil { @@ -297,7 +306,7 @@ func buildTemplateMapCP( values["maxSurge"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } @@ -310,6 +319,17 @@ func buildTemplateMapCP( values["encryptionProviderConfig"] = conf } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } @@ -386,6 +406,15 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1 values["additionalCategories"] = workerNodeGroupMachineSpec.AdditionalCategories } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } return values, nil } @@ -435,3 +464,30 @@ func generateNoProxyList(clusterSpec *cluster.Spec) []string { return noProxyList } + +func generateNutanixFailureDomains(eksNutanixFailureDomains []v1alpha1.NutanixDatacenterFailureDomain) []capxv1beta1.NutanixFailureDomain { + var failureDomains []capxv1beta1.NutanixFailureDomain + for _, fd := range eksNutanixFailureDomains { + + subnets := []capxv1beta1.NutanixResourceIdentifier{} + for _, subnet := range fd.Subnets { + subnets = append(subnets, capxv1beta1.NutanixResourceIdentifier{ + Type: capxv1beta1.NutanixIdentifierType(subnet.Type), + Name: subnet.Name, + UUID: subnet.UUID, + }) + } + + failureDomains = append(failureDomains, capxv1beta1.NutanixFailureDomain{ + Name: fd.Name, + Cluster: capxv1beta1.NutanixResourceIdentifier{ + Type: capxv1beta1.NutanixIdentifierType(fd.Cluster.Type), + Name: fd.Cluster.Name, + UUID: fd.Cluster.UUID, + }, + Subnets: subnets, + ControlPlane: true, + }) + } + return failureDomains +} diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index 37c52f79524b..037081059f57 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/internal/test" @@ -87,6 +88,43 @@ func TestNewNutanixTemplateBuilder(t *testing.T) { assert.Equal(t, expectedSecret, secretSpec) } +func TestNewNutanixTemplateBuilderKubeletConfiguration(t *testing.T) { + dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") + creds := GetCredsFromEnv() + builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) + assert.NotNil(t, builder) + + buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") + buildSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + + buildSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + + spec, err := builder.GenerateCAPISpecControlPlane(buildSpec) + assert.NoError(t, err) + assert.NotNil(t, spec) + + workloadTemplateNames := map[string]string{ + "eksa-unit-test": "eksa-unit-test", + } + kubeadmconfigTemplateNames := map[string]string{ + "eksa-unit-test": "eksa-unit-test", + } + workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) + assert.NoError(t, err) + assert.NotNil(t, workerSpec) +} + func TestNewNutanixTemplateBuilderGenerateCAPISpecControlPlaneFailure(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) @@ -549,6 +587,9 @@ func TestTemplateBuilder_CertSANs(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, @@ -574,6 +615,9 @@ func TestTemplateBuilder_additionalTrustBundle(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, @@ -599,6 +643,9 @@ func TestTemplateBuilderEtcdEncryption(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, @@ -624,6 +671,37 @@ func TestTemplateBuilderEtcdEncryptionKubernetes129(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") + creds := GetCredsFromEnv() + + bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, + map[string]anywherev1.NutanixMachineConfigSpec{}, creds, time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + assert.NoError(t, err) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} + +func TestTemplateBuilderFailureDomains(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_nutanix_failure_domains.yaml", + Output: "testdata/expected_results_failure_domains.yaml", + }, + } { + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml new file mode 100644 index 000000000000..c5750e15cfb5 --- /dev/null +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml @@ -0,0 +1,87 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: test + count: 1 + endpoint: + host: test + machineGroupRef: + name: test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + kind: NutanixMachineConfig + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml new file mode 100644 index 000000000000..25f95fa4cf24 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml new file mode 100644 index 000000000000..91a7f99954f3 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-00005993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml new file mode 100644 index 000000000000..c4dda7d7650f --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "FIZZBUZZ!!!!" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml new file mode 100644 index 000000000000..a35a86b484b2 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-000062d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml new file mode 100644 index 000000000000..18b4ec1eabbf --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml @@ -0,0 +1,73 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: test-ip + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + kubeletConfiguration: + kind: KubeletConfiguration + maxPods: 20 + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml index 8f7e16b55170..c7b78abe1447 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml @@ -641,3 +641,31 @@ spec: - kind: ConfigMap name: user-ca-bundle strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml index a3f90efaa920..e07e818237ea 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -582,3 +582,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml index 8402426982f9..be3eead5c104 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -582,3 +582,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml b/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml index 1eb2c0b146c1..f291088809f1 100644 --- a/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml @@ -586,3 +586,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml index a51fa434fb18..833a2ec9d811 100644 --- a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml @@ -631,3 +631,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml index f9693fd1c865..9e212a7a23c2 100644 --- a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml @@ -661,3 +661,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml index d106949f640f..fe8fbcd9ccc6 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml @@ -643,3 +643,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml index 0bb73d48d754..d60332022f26 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml @@ -657,3 +657,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml new file mode 100644 index 000000000000..b3ff855aa819 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml @@ -0,0 +1,631 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "test" + namespace: "eksa-system" +spec: + failureDomains: + - name: "pe1" + cluster: + type: "name" + name: "prism-cluster-1" + subnets: + - type: "name" + name: "prism-subnet-1" + - type: "uuid" + uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + controlPlane: true + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - type: "name" + name: "prism-subnet-1" + - type: "uuid" + uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + controlPlane: true + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-test" + kind: Secret + controlPlaneEndpoint: + host: "test" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "test" + name: "test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "test" + namespace: "eksa-system" +spec: + replicas: 1 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image-1-19" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "test" + resources: + - kind: ConfigMap + name: test-nutanix-ccm + - kind: Secret + name: test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml b/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml index a63d85d72451..cded83bf575d 100644 --- a/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml @@ -625,3 +625,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_irsa.yaml b/pkg/providers/nutanix/testdata/expected_results_irsa.yaml index a9ae7ec25f98..5f80eb7c2b1f 100644 --- a/pkg/providers/nutanix/testdata/expected_results_irsa.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_irsa.yaml @@ -582,3 +582,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml index 3327ef50f7f3..07326e5f9fee 100644 --- a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml @@ -591,3 +591,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_oidc.yaml b/pkg/providers/nutanix/testdata/expected_results_oidc.yaml index dc955647a810..0bafba10a28d 100644 --- a/pkg/providers/nutanix/testdata/expected_results_oidc.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_oidc.yaml @@ -583,3 +583,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_project.yaml b/pkg/providers/nutanix/testdata/expected_results_project.yaml index 678fbae3c53f..f42167b0b87e 100644 --- a/pkg/providers/nutanix/testdata/expected_results_project.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_project.yaml @@ -585,3 +585,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_proxy.yaml b/pkg/providers/nutanix/testdata/expected_results_proxy.yaml index ac8892124261..ce933ceca1ce 100644 --- a/pkg/providers/nutanix/testdata/expected_results_proxy.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_proxy.yaml @@ -590,3 +590,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml b/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml index eedffb23ace0..97c32981ec50 100644 --- a/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml @@ -631,3 +631,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/validator.go b/pkg/providers/nutanix/validator.go index 2bce2a5c543c..d5cf8ad732ff 100644 --- a/pkg/providers/nutanix/validator.go +++ b/pkg/providers/nutanix/validator.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "regexp" "strconv" "strings" @@ -126,6 +127,37 @@ func (v *Validator) ValidateDatacenterConfig(ctx context.Context, client Client, return err } + if err := v.validateFailureDomains(ctx, client, config); err != nil { + return err + } + + return nil +} + +func (v *Validator) validateFailureDomains(ctx context.Context, client Client, config *anywherev1.NutanixDatacenterConfig) error { + regexName, err := regexp.Compile("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + if err != nil { + return err + } + + for _, fd := range config.Spec.FailureDomains { + if res := regexName.MatchString(fd.Name); !res { + errorStr := `failure domain name should contains only small letters, digits, and hyphens. + It should start with small letter or digit` + return fmt.Errorf(errorStr) + } + + if err := v.validateClusterConfig(ctx, client, fd.Cluster); err != nil { + return err + } + + for _, subnet := range fd.Subnets { + if err := v.validateSubnetConfig(ctx, client, subnet); err != nil { + return err + } + } + } + return nil } diff --git a/pkg/providers/nutanix/validator_test.go b/pkg/providers/nutanix/validator_test.go index 802a365b3654..01fdb204aa17 100644 --- a/pkg/providers/nutanix/validator_test.go +++ b/pkg/providers/nutanix/validator_test.go @@ -5,7 +5,9 @@ import ( _ "embed" "encoding/json" "errors" + "fmt" "net/http" + "strings" "testing" "github.com/golang/mock/gomock" @@ -45,6 +47,18 @@ var nutanixDatacenterConfigSpecWithInvalidCredentialRefKind string //go:embed testdata/datacenterConfig_empty_credentialRef_name.yaml var nutanixDatacenterConfigSpecWithEmptyCredentialRefName string +//go:embed testdata/datacenterConfig_with_failure_domains.yaml +var nutanixDatacenterConfigSpecWithFailureDomain string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_name.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidName string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidCluster string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidSubnet string + func fakeClusterList() *v3.ClusterListIntentResponse { return &v3.ClusterListIntentResponse{ Entities: []*v3.ClusterIntentResponse{ @@ -82,6 +96,96 @@ func fakeSubnetList() *v3.SubnetListIntentResponse { } } +func fakeClusterListForDCTest(filter *string) (*v3.ClusterListIntentResponse, error) { + data := &v3.ClusterListIntentResponse{ + Entities: []*v3.ClusterIntentResponse{ + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1cdb"), + }, + Spec: &v3.Cluster{ + Name: utils.StringPtr("prism-cluster"), + }, + Status: &v3.ClusterDefStatus{ + Resources: &v3.ClusterObj{ + Config: &v3.ClusterConfig{ + ServiceList: []*string{utils.StringPtr("AOS")}, + }, + }, + }, + }, + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("4d69ca7d-022f-49d1-a454-74535993bda4"), + }, + Spec: &v3.Cluster{ + Name: utils.StringPtr("prism-cluster-1"), + }, + Status: &v3.ClusterDefStatus{ + Resources: &v3.ClusterObj{ + Config: &v3.ClusterConfig{ + ServiceList: []*string{utils.StringPtr("AOS")}, + }, + }, + }, + }, + }, + } + + result := &v3.ClusterListIntentResponse{ + Entities: []*v3.ClusterIntentResponse{}, + } + + if filter != nil && *filter != "" { + str := strings.Replace(*filter, "name==", "", -1) + for _, cluster := range data.Entities { + if str == *cluster.Spec.Name { + result.Entities = append(result.Entities, cluster) + } + } + } + + return result, nil +} + +func fakeSubnetListForDCTest(filter *string) (*v3.SubnetListIntentResponse, error) { + data := &v3.SubnetListIntentResponse{ + Entities: []*v3.SubnetIntentResponse{ + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("b15f6966-bfc7-4d1e-8575-224096fc1cdb"), + }, + Spec: &v3.Subnet{ + Name: utils.StringPtr("prism-subnet"), + }, + }, + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("2d166190-7759-4dc6-b835-923262d6b497"), + }, + Spec: &v3.Subnet{ + Name: utils.StringPtr("prism-subnet-1"), + }, + }, + }, + } + + result := &v3.SubnetListIntentResponse{ + Entities: []*v3.SubnetIntentResponse{}, + } + + if filter != nil && *filter != "" { + str := strings.Replace(*filter, "name==", "", -1) + for _, subnet := range data.Entities { + if str == *subnet.Spec.Name { + result.Entities = append(result.Entities, subnet) + } + } + } + + return result, nil +} + func fakeImageList() *v3.ImageListIntentResponse { return &v3.ImageListIntentResponse{ Entities: []*v3.ImageIntentResponse{ @@ -596,11 +700,45 @@ func TestNutanixValidatorValidateDatacenterConfig(t *testing.T) { dcConfFile: nutanixDatacenterConfigSpecWithEmptyCredentialRefName, expectErr: true, }, + { + name: "valid failure domains", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomain, + expectErr: false, + }, + { + name: "failure domain with invalid name", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidName, + expectErr: true, + }, + { + name: "failure domain with invalid cluster", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidCluster, + expectErr: true, + }, + { + name: "failure domains with invalid subnet", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidSubnet, + expectErr: true, + }, } ctrl := gomock.NewController(t) mockClient := mocknutanix.NewMockClient(ctrl) mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.ClusterListIntentResponse, error) { + return fakeClusterListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.SubnetListIntentResponse, error) { + return fakeSubnetListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().GetSubnet(gomock.Any(), gomock.Eq("2d166190-7759-4dc6-b835-923262d6b497")).Return(nil, nil).AnyTimes() + mockClient.EXPECT().GetSubnet(gomock.Any(), gomock.Not("2d166190-7759-4dc6-b835-923262d6b497")).Return(nil, fmt.Errorf("")).AnyTimes() + mockClient.EXPECT().GetCluster(gomock.Any(), gomock.Eq("4d69ca7d-022f-49d1-a454-74535993bda4")).Return(nil, nil).AnyTimes() + mockClient.EXPECT().GetCluster(gomock.Any(), gomock.Not("4d69ca7d-022f-49d1-a454-74535993bda4")).Return(nil, fmt.Errorf("")).AnyTimes() mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() diff --git a/pkg/providers/snow/apibuilder.go b/pkg/providers/snow/apibuilder.go index 5e4f294f3f02..65ecd519ce2d 100644 --- a/pkg/providers/snow/apibuilder.go +++ b/pkg/providers/snow/apibuilder.go @@ -183,7 +183,7 @@ func EtcdadmCluster(log logr.Logger, clusterSpec *cluster.Spec, snowMachineTempl clusterapi.SetBottlerocketHostConfigInEtcdCluster(etcd, machineConfig.Spec.HostOSConfiguration) case v1alpha1.Ubuntu: - clusterapi.SetUbuntuConfigInEtcdCluster(etcd, versionsBundle, string(*clusterSpec.Cluster.Spec.EksaVersion)) + clusterapi.SetUbuntuConfigInEtcdCluster(etcd, versionsBundle, clusterSpec.Cluster.Spec.EksaVersion) etcd.Spec.EtcdadmConfigSpec.PreEtcdadmCommands = append(etcd.Spec.EtcdadmConfigSpec.PreEtcdadmCommands, "/etc/eks/bootstrap.sh", ) diff --git a/pkg/providers/tinkerbell/assert_test.go b/pkg/providers/tinkerbell/assert_test.go index c306360a90e0..900033f0c47b 100644 --- a/pkg/providers/tinkerbell/assert_test.go +++ b/pkg/providers/tinkerbell/assert_test.go @@ -143,6 +143,18 @@ func TestAssertMachineConfigK8sVersionBRWorker_Error(t *testing.T) { g.Expect(err).ToNot(gomega.Succeed()) } +func TestAssertMachineConfigK8sVersionBRModularWorker_Error(t *testing.T) { + g := gomega.NewWithT(t) + builder := NewDefaultValidClusterSpecBuilder() + clusterSpec := builder.Build() + kube129 := eksav1alpha1.Kube129 + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + clusterSpec.Spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubernetesVersion = &kube129 + clusterSpec.MachineConfigs[builder.WorkerNodeGroupMachineName].Spec.OSFamily = "bottlerocket" + err := tinkerbell.AssertOsFamilyValid(clusterSpec) + g.Expect(err).ToNot(gomega.Succeed()) +} + func TestAssertMachineConfigK8sVersionBR_Success(t *testing.T) { g := gomega.NewWithT(t) builder := NewDefaultValidClusterSpecBuilder() diff --git a/pkg/providers/tinkerbell/template.go b/pkg/providers/tinkerbell/template.go index d829723d8f66..1bae93348efe 100644 --- a/pkg/providers/tinkerbell/template.go +++ b/pkg/providers/tinkerbell/template.go @@ -454,9 +454,11 @@ func buildTemplateMapCP( } // Replace public.ecr.aws endpoint with the endpoint given in the cluster config file - localRegistry := values["publicMirror"].(string) - cpTemplateOverride = strings.ReplaceAll(cpTemplateOverride, defaultRegistry, localRegistry) - etcdTemplateOverride = strings.ReplaceAll(etcdTemplateOverride, defaultRegistry, localRegistry) + localRegistry := values["coreEKSAMirror"].(string) + if localRegistry != "" { + cpTemplateOverride = strings.ReplaceAll(cpTemplateOverride, defaultRegistry, localRegistry) + etcdTemplateOverride = strings.ReplaceAll(etcdTemplateOverride, defaultRegistry, localRegistry) + } } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { @@ -474,7 +476,7 @@ func buildTemplateMapCP( values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name values["etcdTemplateOverride"] = etcdTemplateOverride values["etcdHardwareSelector"] = etcdMachineSpec.HardwareSelector - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } @@ -553,8 +555,10 @@ func buildTemplateMapMD( } // Replace public.ecr.aws endpoint with the endpoint given in the cluster config file - localRegistry := values["publicMirror"].(string) - workerTemplateOverride = strings.ReplaceAll(workerTemplateOverride, defaultRegistry, localRegistry) + localRegistry := values["coreEKSAMirror"].(string) + if localRegistry != "" { + workerTemplateOverride = strings.ReplaceAll(workerTemplateOverride, defaultRegistry, localRegistry) + } } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { @@ -624,6 +628,8 @@ func populateRegistryMirrorValues(clusterSpec *cluster.Spec, values map[string]i values["mirrorBase"] = registryMirror.BaseRegistry values["insecureSkip"] = registryMirror.InsecureSkipVerify values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) + values["coreEKSAMirror"] = registryMirror.CoreEKSAMirror() + if len(registryMirror.CACertContent) > 0 { values["registryCACert"] = registryMirror.CACertContent } diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index 70165168d331..3715e523b022 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -133,11 +133,21 @@ spec: {{- end }} {{- if and .registryMirrorMap (eq .format "bottlerocket") }} registryMirror: - endpoint: {{ .publicMirror }} + {{- if .publicECRMirror }} + endpoint: {{ .publicECRMirror }} + {{- end }} {{- if .registryCACert }} caCert: | {{ .registryCACert | indent 10 }} {{- end }} + {{- if not .publicECRMirror }} + mirrors: + {{- range $orig, $mirror := .registryMirrorMap }} + - registry: "{{ $orig }}" + endpoints: + - {{ $mirror }} + {{- end }} + {{- end }} {{- end }} {{- if .bottlerocketSettings }} {{ .bottlerocketSettings | indent 6 }} @@ -236,6 +246,13 @@ spec: certificatesDir: /var/lib/kubeadm/pki {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8}} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .encryptionProviderConfig }} - content: | {{ .encryptionProviderConfig | indent 8}} @@ -383,6 +400,10 @@ spec: path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem {{- end}} initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: @@ -405,6 +426,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} {{- if (eq .format "bottlerocket") }} pause: imageRepository: {{.pauseRepository}} @@ -422,11 +447,21 @@ spec: {{- end }} {{- if and .registryMirrorMap (eq .format "bottlerocket") }} registryMirror: - endpoint: {{ .publicMirror }} + {{- if .publicECRMirror }} + endpoint: {{ .publicECRMirror }} + {{- end }} {{- if .registryCACert }} caCert: | {{ .registryCACert | indent 10 }} {{- end }} + {{- if not .publicECRMirror }} + mirrors: + {{- range $orig, $mirror := .registryMirrorMap }} + - registry: "{{ $orig }}" + endpoints: + - {{ $mirror }} + {{- end }} + {{- end }} {{- end }} {{- if .bottlerocketSettings }} {{ .bottlerocketSettings | indent 6 }} diff --git a/pkg/providers/vsphere/config/template-md.yaml b/pkg/providers/vsphere/config/template-md.yaml index 9427538701b4..164b6c2de1a8 100644 --- a/pkg/providers/vsphere/config/template-md.yaml +++ b/pkg/providers/vsphere/config/template-md.yaml @@ -7,6 +7,10 @@ spec: template: spec: joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} {{- if (eq .format "bottlerocket") }} pause: imageRepository: {{.pauseRepository}} @@ -24,11 +28,21 @@ spec: {{- end }} {{- if and .registryMirrorMap (eq .format "bottlerocket") }} registryMirror: - endpoint: {{ .publicMirror }} + {{- if .publicECRMirror }} + endpoint: {{ .publicECRMirror }} + {{- end }} {{- if .registryCACert }} caCert: | {{ .registryCACert | indent 12 }} {{- end }} + {{- if not .publicECRMirror }} + mirrors: + {{- range $orig, $mirror := .registryMirrorMap }} + - registry: "{{ $orig }}" + endpoints: + - {{ $mirror }} + {{- end }} + {{- end }} {{- end }} {{- if .bottlerocketSettings }} {{ .bottlerocketSettings | indent 8 }} @@ -66,9 +80,16 @@ spec: {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} name: '{{"{{"}} ds.meta_data.hostname {{"}}"}}' -{{- if and (ne .format "bottlerocket") (or .proxyConfig .registryMirrorMap) }} +{{- if or (and (ne .format "bottlerocket") (or .proxyConfig .registryMirrorMap)) .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if and .proxyConfig (ne .format "bottlerocket") }} - content: | [Service] diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index 2e24f7a8d614..cbd6faac1b3f 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -3,6 +3,8 @@ package vsphere import ( "fmt" + "sigs.k8s.io/yaml" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" @@ -230,6 +232,12 @@ func buildTemplateMapCP( values["registryCACert"] = registryMirror.CACertContent } + if controlPlaneMachineSpec.OSFamily == anywherev1.Bottlerocket && + len(registryMirror.NamespacedRegistryMap) == 1 && + registryMirror.CoreEKSAMirror() != "" { + values["publicECRMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) + } + if registryMirror.Auth { values["registryAuth"] = registryMirror.Auth username, password, err := config.ReadCredentials() @@ -302,7 +310,7 @@ func buildTemplateMapCP( } } } - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } @@ -348,6 +356,17 @@ func buildTemplateMapCP( values["encryptionProviderConfig"] = conf } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { values["upgradeRolloutStrategy"] = true if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.Type == anywherev1.InPlaceStrategyType { @@ -419,6 +438,12 @@ func buildTemplateMapMD( values["registryCACert"] = registryMirror.CACertContent } + if workerNodeGroupMachineSpec.OSFamily == anywherev1.Bottlerocket && + len(registryMirror.NamespacedRegistryMap) == 1 && + registryMirror.CoreEKSAMirror() != "" { + values["publicECRMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) + } + if registryMirror.Auth { values["registryAuth"] = registryMirror.Auth username, password, err := config.ReadCredentials() @@ -476,5 +501,15 @@ func buildTemplateMapMD( values["bottlerocketSettings"] = brSettings } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } diff --git a/pkg/providers/vsphere/template_test.go b/pkg/providers/vsphere/template_test.go index 56af1c73f4e4..4ca420d9945a 100644 --- a/pkg/providers/vsphere/template_test.go +++ b/pkg/providers/vsphere/template_test.go @@ -5,8 +5,10 @@ import ( "time" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/aws/eks-anywhere/internal/test" + "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/providers/vsphere" ) @@ -55,6 +57,34 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidEtcdSSHKey(t * ) } +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigWN(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml") + spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + builder := vsphere.NewVsphereTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigCP(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml") + spec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + builder := vsphere.NewVsphereTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { + values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) + }) + g.Expect(err).ToNot(HaveOccurred()) +} + func TestTemplateBuilder_CertSANs(t *testing.T) { t.Setenv(config.EksavSphereUsernameKey, expectedVSphereUsername) t.Setenv(config.EksavSpherePasswordKey, expectedVSpherePassword) diff --git a/pkg/providers/vsphere/testdata/cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml b/pkg/providers/vsphere/testdata/cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml new file mode 100644 index 000000000000..ba3665a89029 --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml @@ -0,0 +1,114 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test +spec: + controlPlaneConfiguration: + count: 3 + endpoint: + host: 1.2.3.4 + machineGroupRef: + name: test-cp + kind: VSphereMachineConfig + kubernetesVersion: "1.21" + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + name: test-wn + kind: VSphereMachineConfig + name: md-0 + externalEtcdConfiguration: + count: 3 + machineGroupRef: + name: test-etcd + kind: VSphereMachineConfig + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + registryMirrorConfiguration: + endpoint: 1.2.3.4 + port: 1234 + ociNamespaces: + - registry: "public.ecr.aws" + namespace: "eks-anywhere" + - registry: "docker.io" + namespace: "eks-anywhere" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-cp +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: bottlerocket + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6" + users: + - name: ec2-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-wn +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: bottlerocket + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6" + users: + - name: ec2-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-etcd +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: bottlerocket + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6" + users: + - name: ec2-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml new file mode 100644 index 000000000000..313aa68889db --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml @@ -0,0 +1,730 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 + kind: EtcdadmCluster + name: test-etcd + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-control-plane-template-1234567890000 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/var/lib/kubeadm/pki/etcd/ca.crt" + certFile: "/var/lib/kubeadm/pki/server-etcd-client.crt" + keyFile: "/var/lib/kubeadm/pki/apiserver-etcd-client.key" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + pause: + imageRepository: public.ecr.aws/eks-distro/kubernetes/pause + imageTag: v1.21.2-eks-1-21-4 + bottlerocketBootstrap: + imageRepository: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap + imageTag: v1-21-4-eks-a-v0.0.0-dev-build.158 + registryMirror: + mirrors: + - registry: "docker.io" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + - registry: "public.ecr.aws" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/lib/kubeadm/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/lib/kubeadm/controller-manager.conf + mountPath: /etc/kubernetes/controller-manager.conf + name: kubeconfig + pathType: File + readOnly: true + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/lib/kubeadm/scheduler.conf + mountPath: /etc/kubernetes/scheduler.conf + name: kubeconfig + pathType: File + readOnly: true + certificatesDir: /var/lib/kubeadm/pki + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /var/lib/kubeadm/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + pause: + imageRepository: public.ecr.aws/eks-distro/kubernetes/pause + imageTag: v1.21.2-eks-1-21-4 + bottlerocketBootstrap: + imageRepository: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap + imageTag: v1-21-4-eks-a-v0.0.0-dev-build.158 + registryMirror: + mirrors: + - registry: "docker.io" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + - registry: "public.ecr.aws" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: ec2-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: bottlerocket + replicas: 3 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: bottlerocket + bottlerocketConfig: + etcdImage: public.ecr.aws/eks-distro/etcd-io/etcd:v3.4.16-eks-1-21-4 + bootstrapImage: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap:v1-21-4-eks-a-v0.0.0-dev-build.158 + pauseImage: public.ecr.aws/eks-distro/kubernetes/pause:v1.21.2-eks-1-21-4 + cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + users: + - name: ec2-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + registryMirror: + endpoint: 1.2.3.4:1234/v2/eks-anywhere + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-etcd-template-1234567890000 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-etcd-template-1234567890000 + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +data: + username: dnNwaGVyZV91c2VybmFtZQ== + password: dnNwaGVyZV9wYXNzd29yZA== +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + data: + vsphere_server.password: dnNwaGVyZV9wYXNzd29yZA== + vsphere_server.username: dnNwaGVyZV91c2VybmFtZQ== + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.21.0-eks-d-1-21-eks-a-v0.0.0-dev-build.158 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml new file mode 100644 index 000000000000..10608d563b70 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml @@ -0,0 +1,101 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: test-md-0-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + joinConfiguration: + pause: + imageRepository: public.ecr.aws/eks-distro/kubernetes/pause + imageTag: v1.21.2-eks-1-21-4 + bottlerocketBootstrap: + imageRepository: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap + imageTag: v1-21-4-eks-a-v0.0.0-dev-build.158 + registryMirror: + mirrors: + - registry: "docker.io" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + - registry: "public.ecr.aws" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + taints: [] + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + cgroup-driver: systemd + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + users: + - name: ec2-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: bottlerocket +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: eksa-system +spec: + clusterName: test + replicas: 3 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: test-md-0-template-1234567890000 + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-md-0-1234567890000 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-md-0-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 4096 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' + +--- diff --git a/pkg/providers/vsphere/vsphere_test.go b/pkg/providers/vsphere/vsphere_test.go index 559f1f3a9f6d..ac5779dbea11 100644 --- a/pkg/providers/vsphere/vsphere_test.go +++ b/pkg/providers/vsphere/vsphere_test.go @@ -1378,6 +1378,43 @@ func TestProviderGenerateDeploymentFileWithMirrorAuth(t *testing.T) { test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_with_auth_config_md.yaml") } +func TestProviderGenerateDeploymentFileForBottleRocketWithMultipleOciNamespaces(t *testing.T) { + clusterSpecManifest := "cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml" + mockCtrl := gomock.NewController(t) + setupContext(t) + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{Name: "test"} + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + ctx := context.Background() + govc := NewDummyProviderGovcClient() + vscb, _ := newMockVSphereClientBuilder(mockCtrl) + ipValidator := mocks.NewMockIPValidator(mockCtrl) + ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil) + v := NewValidator(govc, vscb) + govc.osTag = bottlerocketOSTag + provider := newProvider( + t, + datacenterConfig, + clusterSpec.Cluster, + govc, + kubectl, + v, + ipValidator, + ) + if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil { + t.Fatalf("failed to setup and validate: %v", err) + } + + cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec) + if err != nil { + t.Fatalf("failed to generate cluster api spec contents: %v", err) + } + + test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml") + test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml") +} + func TestUpdateKubeConfig(t *testing.T) { provider := givenProvider(t) content := []byte{} diff --git a/pkg/task/task.go b/pkg/task/task.go index 6f2e46d9cc55..118e6176f910 100644 --- a/pkg/task/task.go +++ b/pkg/task/task.go @@ -36,7 +36,7 @@ type CommandContext struct { Writer filewriter.FileWriter EksdInstaller interfaces.EksdInstaller EksaInstaller interfaces.EksaInstaller - PackageInstaller interfaces.PackageInstaller + PackageManager interfaces.PackageManager EksdUpgrader interfaces.EksdUpgrader ClusterUpgrader interfaces.ClusterUpgrader ClusterCreator interfaces.ClusterCreator @@ -52,6 +52,7 @@ type CommandContext struct { OriginalError error BackupClusterStateDir string ForceCleanup bool + ClusterMover interfaces.ClusterMover } func (c *CommandContext) SetError(err error) { diff --git a/pkg/validations/cluster.go b/pkg/validations/cluster.go index ade4be1cef1b..e4a75f656155 100644 --- a/pkg/validations/cluster.go +++ b/pkg/validations/cluster.go @@ -43,15 +43,6 @@ func ValidateOSForRegistryMirror(clusterSpec *cluster.Spec, provider providers.P return nil } - for _, mc := range machineConfigs { - // BottleRocket accepts only one registry mirror and that is hardcoded for public.ecr.aws at this moment. - // Such a validation will be removed once CAPI is patched to support more than one endpoints for BottleRocket. - if mc.OSFamily() == v1alpha1.Bottlerocket && - (len(ociNamespaces) != 1 || ociNamespaces[0].Registry != constants.DefaultCoreEKSARegistry) { - return fmt.Errorf("%s is the only registry supported in ociNamespaces for %s", constants.DefaultCoreEKSARegistry, v1alpha1.Bottlerocket) - } - } - return nil } diff --git a/pkg/validations/cluster_test.go b/pkg/validations/cluster_test.go index ca87f30a7cd9..553b598f13a7 100644 --- a/pkg/validations/cluster_test.go +++ b/pkg/validations/cluster_test.go @@ -291,7 +291,7 @@ func TestValidateOSForRegistryMirrorNoPublicEcrRegistry(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = test.mirrorConfig - tt.Expect(validations.ValidateOSForRegistryMirror(tt.clusterSpec, tt.provider)).To(MatchError("public.ecr.aws is the only registry supported in ociNamespaces for bottlerocket")) + tt.Expect(validations.ValidateOSForRegistryMirror(tt.clusterSpec, tt.provider)).To(Succeed()) }) } } diff --git a/pkg/validations/input_test.go b/pkg/validations/input_test.go index 38fc103afb25..53ad3c2d3a5f 100644 --- a/pkg/validations/input_test.go +++ b/pkg/validations/input_test.go @@ -94,9 +94,9 @@ func TestValidateClusterNameArg(t *testing.T) { }, { name: "Failure Cluster Length", - args: []string{"qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345"}, - expectedError: errors.New("number of characters in qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345 should be less than 81"), - expectedArg: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345", + args: []string{"cluster-name-equals-to-36-characters"}, + expectedError: errors.New("number of characters in cluster-name-equals-to-36-characters should be less than 36"), + expectedArg: "cluster-name-equals-to-36-characters", }, } diff --git a/pkg/workflows/create_prep.go b/pkg/workflows/create_prep.go index bba782c31513..8e3b76d8c21c 100644 --- a/pkg/workflows/create_prep.go +++ b/pkg/workflows/create_prep.go @@ -7,22 +7,11 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/aws/eks-anywhere/pkg/workflows/interfaces" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" ) // CreateNamespaceIfNotPresent creates the namespace on the cluster if it does not already exist. -func CreateNamespaceIfNotPresent(ctx context.Context, namespace, kubeconfig string, clientFactory interfaces.ClientFactory) error { - client, err := clientFactory.BuildClientFromKubeconfig(kubeconfig) - if err != nil { - return err - } - - if err := client.Get(ctx, namespace, "", &corev1.Namespace{}); err != nil && !errors.IsNotFound(err) { - return err - } else if err == nil { - return nil - } - +func CreateNamespaceIfNotPresent(ctx context.Context, namespace string, client kubernetes.Client) error { ns := &corev1.Namespace{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -33,7 +22,7 @@ func CreateNamespaceIfNotPresent(ctx context.Context, namespace, kubeconfig stri }, } - if err = client.Create(ctx, ns); err != nil { + if err := client.Create(ctx, ns); err != nil && !errors.IsAlreadyExists(err) { return err } diff --git a/pkg/workflows/create_prep_test.go b/pkg/workflows/create_prep_test.go index a5df25c1bc92..c3857f08a3df 100644 --- a/pkg/workflows/create_prep_test.go +++ b/pkg/workflows/create_prep_test.go @@ -50,14 +50,11 @@ func newNamespace(name string) *corev1.Namespace { func TestCreateNamespaceNotExistsSuccess(t *testing.T) { test := newCreatePrepTest(t) - kubeconfig := "testpath" namespace := "test-ns" - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) test.client.EXPECT().Create(test.ctx, newNamespace(namespace)).Return(nil) - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) + err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, test.client) if err != nil { t.Fatalf("Expected nil, but got %v", err) } @@ -65,57 +62,23 @@ func TestCreateNamespaceNotExistsSuccess(t *testing.T) { func TestCreateNamespaceAlreadyExistsSuccess(t *testing.T) { test := newCreatePrepTest(t) - kubeconfig := "testpath" namespace := "default" - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(nil) + test.client.EXPECT().Create(test.ctx, newNamespace(namespace)).Return(apierrors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: ""}, "")) - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) + err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, test.client) if err != nil { t.Fatalf("Expected nil, but got %v", err) } } -func TestCreateNamespaceBuildClientFail(t *testing.T) { - test := newCreatePrepTest(t) - kubeconfig := "testpath" - namespace := "test-ns" - - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, fmt.Errorf("")) - - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) - - if err == nil { - t.Fatalf("Expected error, but got nil") - } -} - -func TestCreateNamespaceGetNamespaceFail(t *testing.T) { - test := newCreatePrepTest(t) - kubeconfig := "testpath" - namespace := "test-ns" - - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(fmt.Errorf("")) - - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) - - if err == nil { - t.Fatalf("Expected error, but got nil") - } -} - func TestCreateNamespaceFail(t *testing.T) { test := newCreatePrepTest(t) - kubeconfig := "testpath" namespace := "test-ns" - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) test.client.EXPECT().Create(test.ctx, newNamespace(namespace)).Return(fmt.Errorf("")) - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) + err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, test.client) if err == nil { t.Fatalf("Expected error, but got nil") diff --git a/pkg/workflows/interfaces/interfaces.go b/pkg/workflows/interfaces/interfaces.go index 60adf204d0e6..182765cc4720 100644 --- a/pkg/workflows/interfaces/interfaces.go +++ b/pkg/workflows/interfaces/interfaces.go @@ -42,6 +42,8 @@ type ClusterManager interface { Upgrade(ctx context.Context, cluster *types.Cluster, currentManagementComponents, newManagementComponents *cluster.ManagementComponents, newSpec *cluster.Spec) (*types.ChangeDiff, error) CreateRegistryCredSecret(ctx context.Context, mgmt *types.Cluster) error GenerateAWSIAMKubeconfig(ctx context.Context, cluster *types.Cluster) error + ResumeEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error + AllowDeleteWhilePaused(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error } type GitOpsManager interface { @@ -74,8 +76,10 @@ type EksdUpgrader interface { Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) error } -type PackageInstaller interface { +// PackageManager handles installation and upgrades of curated packages. +type PackageManager interface { InstallCuratedPackages(ctx context.Context) + UpgradeCuratedPackages(ctx context.Context) } // ClusterUpgrader upgrades the cluster and waits until it's ready. @@ -98,3 +102,8 @@ type EksaInstaller interface { type ClusterDeleter interface { Run(ctx context.Context, spec *cluster.Spec, managementCluster types.Cluster) error } + +// ClusterMover moves the EKS-A cluster. +type ClusterMover interface { + Move(ctx context.Context, spec *cluster.Spec, srcClient, dstClient kubernetes.Client) error +} diff --git a/pkg/workflows/interfaces/mocks/clients.go b/pkg/workflows/interfaces/mocks/clients.go index be9f7cb42185..bdb7d47c53cf 100644 --- a/pkg/workflows/interfaces/mocks/clients.go +++ b/pkg/workflows/interfaces/mocks/clients.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter) +// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageManager,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover) // Package mocks is a generated GoMock package. package mocks @@ -99,6 +99,20 @@ func (m *MockClusterManager) EXPECT() *MockClusterManagerMockRecorder { return m.recorder } +// AllowDeleteWhilePaused mocks base method. +func (m *MockClusterManager) AllowDeleteWhilePaused(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllowDeleteWhilePaused", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AllowDeleteWhilePaused indicates an expected call of AllowDeleteWhilePaused. +func (mr *MockClusterManagerMockRecorder) AllowDeleteWhilePaused(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllowDeleteWhilePaused", reflect.TypeOf((*MockClusterManager)(nil).AllowDeleteWhilePaused), arg0, arg1, arg2) +} + // ApplyBundles mocks base method. func (m *MockClusterManager) ApplyBundles(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error { m.ctrl.T.Helper() @@ -287,6 +301,20 @@ func (mr *MockClusterManagerMockRecorder) ResumeCAPIWorkloadClusters(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeCAPIWorkloadClusters", reflect.TypeOf((*MockClusterManager)(nil).ResumeCAPIWorkloadClusters), arg0, arg1) } +// ResumeEKSAControllerReconcile mocks base method. +func (m *MockClusterManager) ResumeEKSAControllerReconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResumeEKSAControllerReconcile", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResumeEKSAControllerReconcile indicates an expected call of ResumeEKSAControllerReconcile. +func (mr *MockClusterManagerMockRecorder) ResumeEKSAControllerReconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeEKSAControllerReconcile", reflect.TypeOf((*MockClusterManager)(nil).ResumeEKSAControllerReconcile), arg0, arg1, arg2, arg3) +} + // SaveLogsManagementCluster mocks base method. func (m *MockClusterManager) SaveLogsManagementCluster(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error { m.ctrl.T.Helper() @@ -657,39 +685,51 @@ func (mr *MockEksdUpgraderMockRecorder) Upgrade(arg0, arg1, arg2, arg3 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockEksdUpgrader)(nil).Upgrade), arg0, arg1, arg2, arg3) } -// MockPackageInstaller is a mock of PackageInstaller interface. -type MockPackageInstaller struct { +// MockPackageManager is a mock of PackageManager interface. +type MockPackageManager struct { ctrl *gomock.Controller - recorder *MockPackageInstallerMockRecorder + recorder *MockPackageManagerMockRecorder } -// MockPackageInstallerMockRecorder is the mock recorder for MockPackageInstaller. -type MockPackageInstallerMockRecorder struct { - mock *MockPackageInstaller +// MockPackageManagerMockRecorder is the mock recorder for MockPackageManager. +type MockPackageManagerMockRecorder struct { + mock *MockPackageManager } -// NewMockPackageInstaller creates a new mock instance. -func NewMockPackageInstaller(ctrl *gomock.Controller) *MockPackageInstaller { - mock := &MockPackageInstaller{ctrl: ctrl} - mock.recorder = &MockPackageInstallerMockRecorder{mock} +// NewMockPackageManager creates a new mock instance. +func NewMockPackageManager(ctrl *gomock.Controller) *MockPackageManager { + mock := &MockPackageManager{ctrl: ctrl} + mock.recorder = &MockPackageManagerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPackageInstaller) EXPECT() *MockPackageInstallerMockRecorder { +func (m *MockPackageManager) EXPECT() *MockPackageManagerMockRecorder { return m.recorder } // InstallCuratedPackages mocks base method. -func (m *MockPackageInstaller) InstallCuratedPackages(arg0 context.Context) { +func (m *MockPackageManager) InstallCuratedPackages(arg0 context.Context) { m.ctrl.T.Helper() m.ctrl.Call(m, "InstallCuratedPackages", arg0) } // InstallCuratedPackages indicates an expected call of InstallCuratedPackages. -func (mr *MockPackageInstallerMockRecorder) InstallCuratedPackages(arg0 interface{}) *gomock.Call { +func (mr *MockPackageManagerMockRecorder) InstallCuratedPackages(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCuratedPackages", reflect.TypeOf((*MockPackageManager)(nil).InstallCuratedPackages), arg0) +} + +// UpgradeCuratedPackages mocks base method. +func (m *MockPackageManager) UpgradeCuratedPackages(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpgradeCuratedPackages", arg0) +} + +// UpgradeCuratedPackages indicates an expected call of UpgradeCuratedPackages. +func (mr *MockPackageManagerMockRecorder) UpgradeCuratedPackages(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCuratedPackages", reflect.TypeOf((*MockPackageInstaller)(nil).InstallCuratedPackages), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeCuratedPackages", reflect.TypeOf((*MockPackageManager)(nil).UpgradeCuratedPackages), arg0) } // MockClusterUpgrader is a mock of ClusterUpgrader interface. @@ -892,3 +932,40 @@ func (mr *MockClusterDeleterMockRecorder) Run(arg0, arg1, arg2 interface{}) *gom mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockClusterDeleter)(nil).Run), arg0, arg1, arg2) } + +// MockClusterMover is a mock of ClusterMover interface. +type MockClusterMover struct { + ctrl *gomock.Controller + recorder *MockClusterMoverMockRecorder +} + +// MockClusterMoverMockRecorder is the mock recorder for MockClusterMover. +type MockClusterMoverMockRecorder struct { + mock *MockClusterMover +} + +// NewMockClusterMover creates a new mock instance. +func NewMockClusterMover(ctrl *gomock.Controller) *MockClusterMover { + mock := &MockClusterMover{ctrl: ctrl} + mock.recorder = &MockClusterMoverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClusterMover) EXPECT() *MockClusterMoverMockRecorder { + return m.recorder +} + +// Move mocks base method. +func (m *MockClusterMover) Move(arg0 context.Context, arg1 *cluster.Spec, arg2, arg3 kubernetes.Client) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Move", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Move indicates an expected call of Move. +func (mr *MockClusterMoverMockRecorder) Move(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Move", reflect.TypeOf((*MockClusterMover)(nil).Move), arg0, arg1, arg2, arg3) +} diff --git a/pkg/workflows/management/create.go b/pkg/workflows/management/create.go index 9b8e4ab60bb3..8679d9a8c351 100644 --- a/pkg/workflows/management/create.go +++ b/pkg/workflows/management/create.go @@ -12,16 +12,17 @@ import ( // Create is a schema for create cluster. type Create struct { - bootstrapper interfaces.Bootstrapper - clientFactory interfaces.ClientFactory - provider providers.Provider - clusterManager interfaces.ClusterManager - gitOpsManager interfaces.GitOpsManager - writer filewriter.FileWriter - eksdInstaller interfaces.EksdInstaller - packageInstaller interfaces.PackageInstaller - clusterCreator interfaces.ClusterCreator - eksaInstaller interfaces.EksaInstaller + bootstrapper interfaces.Bootstrapper + clientFactory interfaces.ClientFactory + provider providers.Provider + clusterManager interfaces.ClusterManager + gitOpsManager interfaces.GitOpsManager + writer filewriter.FileWriter + eksdInstaller interfaces.EksdInstaller + packageManager interfaces.PackageManager + clusterCreator interfaces.ClusterCreator + eksaInstaller interfaces.EksaInstaller + clusterMover interfaces.ClusterMover } // NewCreate builds a new create construct. @@ -29,39 +30,42 @@ func NewCreate(bootstrapper interfaces.Bootstrapper, clientFactory interfaces.ClientFactory, provider providers.Provider, clusterManager interfaces.ClusterManager, gitOpsManager interfaces.GitOpsManager, writer filewriter.FileWriter, eksdInstaller interfaces.EksdInstaller, - packageInstaller interfaces.PackageInstaller, + packageManager interfaces.PackageManager, clusterCreator interfaces.ClusterCreator, eksaInstaller interfaces.EksaInstaller, + mover interfaces.ClusterMover, ) *Create { return &Create{ - bootstrapper: bootstrapper, - clientFactory: clientFactory, - provider: provider, - clusterManager: clusterManager, - gitOpsManager: gitOpsManager, - writer: writer, - eksdInstaller: eksdInstaller, - packageInstaller: packageInstaller, - clusterCreator: clusterCreator, - eksaInstaller: eksaInstaller, + bootstrapper: bootstrapper, + clientFactory: clientFactory, + provider: provider, + clusterManager: clusterManager, + gitOpsManager: gitOpsManager, + writer: writer, + eksdInstaller: eksdInstaller, + packageManager: packageManager, + clusterCreator: clusterCreator, + eksaInstaller: eksaInstaller, + clusterMover: mover, } } // Run runs all the create management cluster tasks. func (c *Create) Run(ctx context.Context, clusterSpec *cluster.Spec, validator interfaces.Validator) error { commandContext := &task.CommandContext{ - Bootstrapper: c.bootstrapper, - ClientFactory: c.clientFactory, - Provider: c.provider, - ClusterManager: c.clusterManager, - GitOpsManager: c.gitOpsManager, - ClusterSpec: clusterSpec, - Writer: c.writer, - Validations: validator, - EksdInstaller: c.eksdInstaller, - PackageInstaller: c.packageInstaller, - ClusterCreator: c.clusterCreator, - EksaInstaller: c.eksaInstaller, + Bootstrapper: c.bootstrapper, + ClientFactory: c.clientFactory, + Provider: c.provider, + ClusterManager: c.clusterManager, + GitOpsManager: c.gitOpsManager, + ClusterSpec: clusterSpec, + Writer: c.writer, + Validations: validator, + EksdInstaller: c.eksdInstaller, + PackageManager: c.packageManager, + ClusterCreator: c.clusterCreator, + EksaInstaller: c.eksaInstaller, + ClusterMover: c.clusterMover, } return task.NewTaskRunner(&setupAndValidateCreate{}, c.writer).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/create_curated_packages.go b/pkg/workflows/management/create_curated_packages.go index a3c9d407a23b..14ab0465f202 100644 --- a/pkg/workflows/management/create_curated_packages.go +++ b/pkg/workflows/management/create_curated_packages.go @@ -9,7 +9,7 @@ import ( type installCuratedPackagesTask struct{} func (s *installCuratedPackagesTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { - commandContext.PackageInstaller.InstallCuratedPackages(ctx) + commandContext.PackageManager.InstallCuratedPackages(ctx) return nil } diff --git a/pkg/workflows/management/create_install_eksa.go b/pkg/workflows/management/create_install_eksa.go index 93a91627bd5e..b24674f6b44d 100644 --- a/pkg/workflows/management/create_install_eksa.go +++ b/pkg/workflows/management/create_install_eksa.go @@ -58,15 +58,32 @@ func (s *installEksaComponentsOnWorkloadTask) Run(ctx context.Context, commandCo commandContext.ClusterSpec.Cluster.AddManagedByCLIAnnotation() commandContext.ClusterSpec.Cluster.SetManagementComponentsVersion(commandContext.ClusterSpec.EKSARelease.Spec.Version) + srcClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.BootstrapCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + dstClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.WorkloadCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + if commandContext.ClusterSpec.Cluster.Namespace != "" { - if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, commandContext.WorkloadCluster.KubeconfigFile, commandContext.ClientFactory); err != nil { + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, dstClient); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } } - logger.Info("Applying cluster spec to workload cluster") - if err = commandContext.ClusterCreator.Run(ctx, commandContext.ClusterSpec, *commandContext.WorkloadCluster); err != nil { + logger.Info("Moving cluster spec to workload cluster") + if err = commandContext.ClusterMover.Move(ctx, commandContext.ClusterSpec, srcClient, dstClient); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + if err = commandContext.ClusterManager.ResumeEKSAControllerReconcile(ctx, commandContext.WorkloadCluster, commandContext.ClusterSpec, commandContext.Provider); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } diff --git a/pkg/workflows/management/create_test.go b/pkg/workflows/management/create_test.go index 4884b57be875..010319aaacb3 100644 --- a/pkg/workflows/management/create_test.go +++ b/pkg/workflows/management/create_test.go @@ -8,9 +8,7 @@ import ( "github.com/golang/mock/gomock" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -29,7 +27,7 @@ import ( type createTestSetup struct { t *testing.T - packageInstaller *mocks.MockPackageInstaller + packageInstaller *mocks.MockPackageManager clusterManager *mocks.MockClusterManager bootstrapper *mocks.MockBootstrapper gitOpsManager *mocks.MockGitOpsManager @@ -49,6 +47,7 @@ type createTestSetup struct { workflow *management.Create client *clientmocks.MockClient clientFactory *mocks.MockClientFactory + mover *mocks.MockClusterMover } func newCreateTest(t *testing.T) *createTestSetup { @@ -63,7 +62,7 @@ func newCreateTest(t *testing.T) *createTestSetup { eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl) eksaInstaller := mocks.NewMockEksaInstaller(mockCtrl) - packageInstaller := mocks.NewMockPackageInstaller(mockCtrl) + packageInstaller := mocks.NewMockPackageManager(mockCtrl) datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} machineConfigs := []providers.MachineConfig{&v1alpha1.VSphereMachineConfig{}} @@ -71,6 +70,7 @@ func newCreateTest(t *testing.T) *createTestSetup { validator := mocks.NewMockValidator(mockCtrl) client := clientmocks.NewMockClient(mockCtrl) clientFactory := mocks.NewMockClientFactory(mockCtrl) + mover := mocks.NewMockClusterMover(mockCtrl) workflow := management.NewCreate( bootstrapper, @@ -83,6 +83,7 @@ func newCreateTest(t *testing.T) *createTestSetup { packageInstaller, clusterCreator, eksaInstaller, + mover, ) for _, e := range featureEnvVars { @@ -119,6 +120,7 @@ func newCreateTest(t *testing.T) *createTestSetup { managementComponents: managementComponents, clusterSpec: clusterSpec, client: client, + mover: mover, } } @@ -217,7 +219,7 @@ func (c *createTestSetup) expectMoveManagement(err error) { c.ctx, c.bootstrapCluster, c.workloadCluster, c.workloadCluster.Name, c.clusterSpec, gomock.Any()).Return(err) } -func (c *createTestSetup) expectInstallEksaComponentsWorkload(err1, err2, err3 error) { +func (c *createTestSetup) expectInstallEksaComponentsWorkload(err1, err2, err3, err4, err5 error) { gomock.InOrder( c.eksdInstaller.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.workloadCluster).Return(err1), @@ -231,9 +233,13 @@ func (c *createTestSetup) expectInstallEksaComponentsWorkload(err1, err2, err3 e c.eksdInstaller.EXPECT().InstallEksdManifest( c.ctx, c.clusterSpec, c.workloadCluster), - c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.workloadCluster.KubeconfigFile).Return(c.client, err3), + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.bootstrapCluster.KubeconfigFile).Return(c.client, err2), - c.clusterCreator.EXPECT().Run(c.ctx, c.clusterSpec, *c.workloadCluster).Return(err2), + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.workloadCluster.KubeconfigFile).Return(c.client, err3).MaxTimes(1), + + c.mover.EXPECT().Move(c.ctx, c.clusterSpec, c.client, c.client).Return(err4).MaxTimes(1), + + c.clusterManager.EXPECT().ResumeEKSAControllerReconcile(c.ctx, c.workloadCluster, c.clusterSpec, c.provider).Return(err5).MaxTimes(1), ) } @@ -256,7 +262,6 @@ func (c *createTestSetup) expectCreateNamespace() { }, ObjectMeta: v1.ObjectMeta{Name: n}, } - c.client.EXPECT().Get(c.ctx, n, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")).MaxTimes(2) c.client.EXPECT().Create(c.ctx, ns).MaxTimes(2) } @@ -291,7 +296,7 @@ func TestCreateRunSuccess(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectWriteClusterConfig() test.expectDeleteBootstrap(nil) @@ -734,7 +739,7 @@ func TestCreateEKSAWorkloadFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, fmt.Errorf("test"), nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, fmt.Errorf("test"), nil) test.expectCreateNamespace() test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) @@ -747,7 +752,7 @@ func TestCreateEKSAWorkloadFailure(t *testing.T) { } } -func TestCreateEKSAWorkloadNamespaceFailure(t *testing.T) { +func TestCreateSrcClientFailure(t *testing.T) { test := newCreateTest(t) test.expectSetup() test.expectPreflightValidationsToPass() @@ -755,25 +760,59 @@ func TestCreateEKSAWorkloadNamespaceFailure(t *testing.T) { test.expectCAPIInstall(nil, nil, nil) test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) test.expectCreateWorkload(nil, nil, nil, nil, nil, nil) - test.expectCreateNamespace() test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - gomock.InOrder( + test.expectInstallEksaComponentsWorkload(nil, fmt.Errorf(""), nil, nil, nil) + test.expectCreateNamespace() + + test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) - test.eksdInstaller.EXPECT().InstallEksdCRDs(test.ctx, test.clusterSpec, test.workloadCluster), + test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any()) - test.eksaInstaller.EXPECT().Install( - test.ctx, logger.Get(), test.workloadCluster, test.managementComponents, test.clusterSpec), + err := test.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateDstClientFailure(t *testing.T) { + test := newCreateTest(t) + test.expectSetup() + test.expectPreflightValidationsToPass() + test.expectCreateBootstrap() + test.expectCAPIInstall(nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) + test.expectCreateWorkload(nil, nil, nil, nil, nil, nil) + test.expectInstallResourcesOnManagementTask(nil) + test.expectPauseReconcile(nil) + test.expectMoveManagement(nil) + test.expectInstallEksaComponentsWorkload(nil, nil, fmt.Errorf(""), nil, nil) + test.expectCreateNamespace() - test.provider.EXPECT().InstallCustomProviderComponents( - test.ctx, test.workloadCluster.KubeconfigFile), + test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) - test.eksdInstaller.EXPECT().InstallEksdManifest( - test.ctx, test.clusterSpec, test.workloadCluster), + test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any()) - test.clientFactory.EXPECT().BuildClientFromKubeconfig(test.workloadCluster.KubeconfigFile).Return(test.client, fmt.Errorf("")), - ) + err := test.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateEKSAResumeWorkloadFailure(t *testing.T) { + test := newCreateTest(t) + test.expectSetup() + test.expectPreflightValidationsToPass() + test.expectCreateBootstrap() + test.expectCAPIInstall(nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) + test.expectCreateWorkload(nil, nil, nil, nil, nil, nil) + test.expectInstallResourcesOnManagementTask(nil) + test.expectPauseReconcile(nil) + test.expectMoveManagement(nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, fmt.Errorf("test")) + test.expectCreateNamespace() test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) @@ -796,7 +835,7 @@ func TestCreateGitOPsFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectCreateNamespace() test.expectDatacenterConfig() test.expectMachineConfigs() @@ -825,7 +864,7 @@ func TestCreateWriteConfigFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectPreflightValidationsToPass() test.expectCreateNamespace() @@ -860,7 +899,7 @@ func TestCreateWriteConfigAWSIAMFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectPreflightValidationsToPass() test.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{} @@ -896,7 +935,7 @@ func TestCreateRunDeleteBootstrapFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectWriteClusterConfig() test.expectDeleteBootstrap(fmt.Errorf("test")) @@ -912,3 +951,20 @@ func TestCreateRunDeleteBootstrapFailure(t *testing.T) { t.Fatalf("Create.Run() err = %v, want err = nil", err) } } + +func TestCreateNamespaceClientFailure(t *testing.T) { + test := newCreateTest(t) + test.expectSetup() + test.expectCreateBootstrap() + test.expectPreflightValidationsToPass() + test.expectCAPIInstall(nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) + test.clientFactory.EXPECT().BuildClientFromKubeconfig(test.bootstrapCluster.KubeconfigFile).Return(test.client, fmt.Errorf("")) + test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) + test.writer.EXPECT().Write("test-cluster-checkpoint.yaml", gomock.Any(), gomock.Any()) + + err := test.run() + if err == nil { + t.Fatalf("Create.Run() err = %v, want err = nil", err) + } +} diff --git a/pkg/workflows/management/create_workload.go b/pkg/workflows/management/create_workload.go index e3289a297816..bc8f5fc946b3 100644 --- a/pkg/workflows/management/create_workload.go +++ b/pkg/workflows/management/create_workload.go @@ -18,8 +18,14 @@ func (s *createWorkloadClusterTask) Run(ctx context.Context, commandContext *tas commandContext.ClusterSpec.Cluster.AddManagedByCLIAnnotation() commandContext.ClusterSpec.Cluster.SetManagementComponentsVersion(commandContext.ClusterSpec.EKSARelease.Spec.Version) + client, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.BootstrapCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + if commandContext.ClusterSpec.Cluster.Namespace != "" { - if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, commandContext.BootstrapCluster.KubeconfigFile, commandContext.ClientFactory); err != nil { + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, client); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } diff --git a/pkg/workflows/management/delete.go b/pkg/workflows/management/delete.go index a8af5e4b38aa..9ed7adf86484 100644 --- a/pkg/workflows/management/delete.go +++ b/pkg/workflows/management/delete.go @@ -22,6 +22,7 @@ type Delete struct { eksdInstaller interfaces.EksdInstaller eksaInstaller interfaces.EksaInstaller clientFactory interfaces.ClientFactory + clusterMover interfaces.ClusterMover } // NewDelete builds a new delete construct. @@ -34,6 +35,7 @@ func NewDelete(bootstrapper interfaces.Bootstrapper, eksdInstaller interfaces.EksdInstaller, eksaInstaller interfaces.EksaInstaller, clientFactory interfaces.ClientFactory, + mover interfaces.ClusterMover, ) *Delete { return &Delete{ bootstrapper: bootstrapper, @@ -45,6 +47,7 @@ func NewDelete(bootstrapper interfaces.Bootstrapper, eksdInstaller: eksdInstaller, eksaInstaller: eksaInstaller, clientFactory: clientFactory, + clusterMover: mover, } } @@ -62,6 +65,7 @@ func (c *Delete) Run(ctx context.Context, workload *types.Cluster, clusterSpec * EksdInstaller: c.eksdInstaller, EksaInstaller: c.eksaInstaller, ClientFactory: c.clientFactory, + ClusterMover: c.clusterMover, } return task.NewTaskRunner(&setupAndValidateDelete{}, c.writer).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/delete_install_eksa.go b/pkg/workflows/management/delete_install_eksa.go index b7dd96e6c93c..30f447113985 100644 --- a/pkg/workflows/management/delete_install_eksa.go +++ b/pkg/workflows/management/delete_install_eksa.go @@ -3,15 +3,9 @@ package management import ( "context" - "github.com/pkg/errors" - - "github.com/aws/eks-anywhere/pkg/clients/kubernetes" - "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/task" - "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/workflows" - "github.com/aws/eks-anywhere/pkg/workflows/interfaces" ) type installEksaComponentsOnBootstrapForDeleteTask struct{} @@ -24,16 +18,34 @@ func (s *installEksaComponentsOnBootstrapForDeleteTask) Run(ctx context.Context, return &workflows.CollectDiagnosticsTask{} } - commandContext.ClusterSpec.Cluster.PauseReconcile() - commandContext.ClusterSpec.Cluster.AllowDeleteWhilePaused() - commandContext.ClusterSpec.Cluster.SetFinalizers([]string{"clusters.anywhere.eks.amazonaws.com/finalizer"}) - commandContext.ClusterSpec.Cluster.AddManagedByCLIAnnotation() - err = applyClusterSpecOnBootstrapForDeleteTask(ctx, commandContext.ClusterSpec, commandContext.BootstrapCluster, commandContext.ClientFactory) + srcClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.WorkloadCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + dstClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.BootstrapCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, dstClient); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + err = commandContext.ClusterMover.Move(ctx, commandContext.ClusterSpec, srcClient, dstClient) if err != nil { commandContext.SetError(err) return &workflows.CollectDiagnosticsTask{} } + if err = commandContext.ClusterManager.AllowDeleteWhilePaused(ctx, commandContext.BootstrapCluster, commandContext.ClusterSpec); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + return &deleteManagementCluster{} } @@ -48,26 +60,3 @@ func (s *installEksaComponentsOnBootstrapForDeleteTask) Restore(ctx context.Cont func (s *installEksaComponentsOnBootstrapForDeleteTask) Checkpoint() *task.CompletedTask { return nil } - -func applyClusterSpecOnBootstrapForDeleteTask(ctx context.Context, spec *cluster.Spec, cluster *types.Cluster, clientFactory interfaces.ClientFactory) error { - if err := workflows.CreateNamespaceIfNotPresent(ctx, spec.Cluster.Namespace, cluster.KubeconfigFile, clientFactory); err != nil { - return errors.Wrapf(err, "creating namespace on bootstrap") - } - - client, err := clientFactory.BuildClientFromKubeconfig(cluster.KubeconfigFile) - if err != nil { - return errors.Wrap(err, "building client to apply cluster spec changes") - } - - for _, obj := range spec.ClusterAndChildren() { - if err := client.ApplyServerSide(ctx, - "eks-a-cli", - obj, - kubernetes.ApplyServerSideOptions{ForceOwnership: true}, - ); err != nil { - return errors.Wrapf(err, "applying cluster spec") - } - } - - return nil -} diff --git a/pkg/workflows/management/delete_test.go b/pkg/workflows/management/delete_test.go index 32c2cac65325..05162cf57995 100644 --- a/pkg/workflows/management/delete_test.go +++ b/pkg/workflows/management/delete_test.go @@ -8,9 +8,6 @@ import ( "github.com/golang/mock/gomock" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -48,6 +45,7 @@ type deleteTestSetup struct { clientFactory *mocks.MockClientFactory managementComponents *cluster.ManagementComponents client *clientmocks.MockClient + mover *mocks.MockClusterMover } func newDeleteTest(t *testing.T) *deleteTestSetup { @@ -73,6 +71,7 @@ func newDeleteTest(t *testing.T) *deleteTestSetup { s.GitOpsConfig = &v1alpha1.GitOpsConfig{} }) managementComponents := cluster.ManagementComponentsFromBundles(clusterSpec.Bundles) + mover := mocks.NewMockClusterMover(mockCtrl) workload := management.NewDelete( bootstrapper, @@ -84,6 +83,7 @@ func newDeleteTest(t *testing.T) *deleteTestSetup { eksdInstaller, eksaInstaller, clientFactory, + mover, ) for _, e := range featureEnvVars { @@ -110,6 +110,7 @@ func newDeleteTest(t *testing.T) *deleteTestSetup { clientFactory: clientFactory, managementComponents: managementComponents, client: client, + mover: mover, } } @@ -166,7 +167,7 @@ func (c *deleteTestSetup) expectMoveCAPI(err1, err2 error) { c.clusterManager.EXPECT().MoveCAPI(c.ctx, c.workloadCluster, c.bootstrapCluster, c.workloadCluster.Name, c.clusterSpec, gomock.Any()).Return(err2) } -func (c *deleteTestSetup) expectInstallEksaComponentsBootstrap(err1, err2, err3, err4, err5 error) { +func (c *deleteTestSetup) expectInstallEksaComponentsBootstrap(err1, err2, err3, err4, err5, err6, err7, err8, err9 error) { gomock.InOrder( c.eksdInstaller.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.bootstrapCluster).Return(err1).AnyTimes(), @@ -179,7 +180,15 @@ func (c *deleteTestSetup) expectInstallEksaComponentsBootstrap(err1, err2, err3, c.eksdInstaller.EXPECT().InstallEksdManifest( c.ctx, c.clusterSpec, c.bootstrapCluster).Return(err4).AnyTimes(), - c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.bootstrapCluster.KubeconfigFile).Return(c.client, err5).AnyTimes(), + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.workloadCluster.KubeconfigFile).Return(c.client, err5).MaxTimes(1), + + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.bootstrapCluster.KubeconfigFile).Return(c.client, err6).MaxTimes(1), + + c.client.EXPECT().Create(c.ctx, gomock.AssignableToTypeOf(&corev1.Namespace{})).Return(err7).AnyTimes(), + + c.mover.EXPECT().Move(c.ctx, c.clusterSpec, c.client, c.client).Return(err8).AnyTimes(), + + c.clusterManager.EXPECT().AllowDeleteWhilePaused(c.ctx, c.bootstrapCluster, c.clusterSpec).Return(err9).AnyTimes(), ) } @@ -195,19 +204,6 @@ func (c *deleteTestSetup) expectApplyOnBootstrap(err error) { c.client.EXPECT().ApplyServerSide(c.ctx, "eks-a-cli", gomock.Any(), gomock.Any()).Return(err).AnyTimes() } -func (c *deleteTestSetup) expectCreateNamespace() { - n := c.clusterSpec.Cluster.Namespace - ns := &corev1.Namespace{ - TypeMeta: v1.TypeMeta{ - APIVersion: "v1", - Kind: "Namespace", - }, - ObjectMeta: v1.ObjectMeta{Name: n}, - } - c.client.EXPECT().Get(c.ctx, n, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) - c.client.EXPECT().Create(c.ctx, ns) -} - func TestDeleteRunSuccess(t *testing.T) { features.ClearCache() os.Setenv(features.UseControllerForCli, "true") @@ -218,12 +214,11 @@ func TestDeleteRunSuccess(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) test.expectDeleteCluster(nil, nil) test.expectCleanupGitRepo(nil) test.expectDeleteBootstrap(nil) - test.expectCreateNamespace() err := test.run() if err != nil { @@ -355,7 +350,7 @@ func TestDeleteRunFailResumeReconcile(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(fmt.Errorf(""), nil, nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(fmt.Errorf(""), nil, nil, nil, nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() err := test.run() @@ -374,7 +369,7 @@ func TestDeleteRunFailAddAnnotation(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, fmt.Errorf(""), nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, fmt.Errorf(""), nil, nil, nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() @@ -394,7 +389,7 @@ func TestDeleteRunFailProviderInstall(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, fmt.Errorf(""), nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, fmt.Errorf(""), nil, nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() @@ -414,7 +409,7 @@ func TestDeleteRunFailEksdInstall(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, fmt.Errorf(""), nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, fmt.Errorf(""), nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() @@ -424,7 +419,7 @@ func TestDeleteRunFailEksdInstall(t *testing.T) { } } -func TestDeleteRunFailBuildClient(t *testing.T) { +func TestDeleteRunFailBuildSrcClient(t *testing.T) { features.ClearCache() os.Setenv(features.UseControllerForCli, "true") test := newDeleteTest(t) @@ -434,9 +429,65 @@ func TestDeleteRunFailBuildClient(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, fmt.Errorf("")) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, fmt.Errorf(""), nil, nil, nil, nil) + test.expectSaveLogsManagement() + + err := test.run() + if err == nil { + t.Fatalf("Delete.Run() err = %v, want err = nil", err) + } +} + +func TestDeleteRunFailBuildDstClient(t *testing.T) { + features.ClearCache() + os.Setenv(features.UseControllerForCli, "true") + test := newDeleteTest(t) + test.expectSetup(nil) + test.expectBootstrapOpts(nil) + test.expectCreateBootstrap(nil) + test.expectPreCAPI(nil) + test.expectInstallCAPI(nil) + test.expectMoveCAPI(nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, fmt.Errorf(""), nil, nil, nil) + test.expectSaveLogsManagement() + + err := test.run() + if err == nil { + t.Fatalf("Delete.Run() err = %v, want err = nil", err) + } +} + +func TestDeleteRunFailCreateNamespace(t *testing.T) { + features.ClearCache() + os.Setenv(features.UseControllerForCli, "true") + test := newDeleteTest(t) + test.expectSetup(nil) + test.expectBootstrapOpts(nil) + test.expectCreateBootstrap(nil) + test.expectPreCAPI(nil) + test.expectInstallCAPI(nil) + test.expectMoveCAPI(nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, fmt.Errorf(""), nil, nil) + test.expectSaveLogsManagement() + + err := test.run() + if err == nil { + t.Fatalf("Delete.Run() err = %v, want err = nil", err) + } +} + +func TestDeleteRunFailAllowDeleteWhilePaused(t *testing.T) { + features.ClearCache() + os.Setenv(features.UseControllerForCli, "true") + test := newDeleteTest(t) + test.expectSetup(nil) + test.expectBootstrapOpts(nil) + test.expectCreateBootstrap(nil) + test.expectPreCAPI(nil) + test.expectInstallCAPI(nil) + test.expectMoveCAPI(nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("")) test.expectSaveLogsManagement() - test.expectSaveLogsWorkload() err := test.run() if err == nil { @@ -454,8 +505,7 @@ func TestDeleteRunFailPostDelete(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) - test.expectCreateNamespace() + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) test.expectDeleteCluster(nil, fmt.Errorf("")) test.expectSaveLogsManagement() @@ -476,8 +526,7 @@ func TestDeleteRunFailCleanupGit(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) - test.expectCreateNamespace() + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) test.expectDeleteCluster(nil, nil) test.expectCleanupGitRepo(fmt.Errorf("")) @@ -500,9 +549,8 @@ func TestDeleteRunFailDeleteBootstrap(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) - test.expectCreateNamespace() test.expectDeleteCluster(nil, nil) test.expectCleanupGitRepo(nil) test.expectDeleteBootstrap(fmt.Errorf("")) diff --git a/pkg/workflows/management/post_cluster_upgrade.go b/pkg/workflows/management/post_cluster_upgrade.go index 78c7975fa446..ca3f8f6ab128 100644 --- a/pkg/workflows/management/post_cluster_upgrade.go +++ b/pkg/workflows/management/post_cluster_upgrade.go @@ -28,7 +28,7 @@ func (s *postClusterUpgrade) Run(ctx context.Context, commandContext *task.Comma logger.Info(fmt.Sprintf("management cluster CAPI backup file not found: %v", err)) } - return nil + return &upgradeCuratedPackagesTask{} } func (s *postClusterUpgrade) Name() string { diff --git a/pkg/workflows/management/upgrade.go b/pkg/workflows/management/upgrade.go index e8de7b767fa2..001336c0feb6 100644 --- a/pkg/workflows/management/upgrade.go +++ b/pkg/workflows/management/upgrade.go @@ -24,6 +24,7 @@ type Upgrade struct { eksdUpgrader interfaces.EksdUpgrader upgradeChangeDiff *types.ChangeDiff clusterUpgrader interfaces.ClusterUpgrader + packageManager interfaces.PackageManager } // NewUpgrade builds a new upgrade construct. @@ -35,6 +36,7 @@ func NewUpgrade(clientFactory interfaces.ClientFactory, provider providers.Provi eksdUpgrader interfaces.EksdUpgrader, eksdInstaller interfaces.EksdInstaller, clusterUpgrade interfaces.ClusterUpgrader, + packageManager interfaces.PackageManager, ) *Upgrade { upgradeChangeDiff := types.NewChangeDiff() return &Upgrade{ @@ -48,6 +50,7 @@ func NewUpgrade(clientFactory interfaces.ClientFactory, provider providers.Provi eksdInstaller: eksdInstaller, upgradeChangeDiff: upgradeChangeDiff, clusterUpgrader: clusterUpgrade, + packageManager: packageManager, } } @@ -67,6 +70,7 @@ func (c *Upgrade) Run(ctx context.Context, clusterSpec *cluster.Spec, management EksdUpgrader: c.eksdUpgrader, UpgradeChangeDiff: c.upgradeChangeDiff, ClusterUpgrader: c.clusterUpgrader, + PackageManager: c.packageManager, } if features.IsActive(features.CheckpointEnabled()) { return task.NewTaskRunner(&setupAndValidateUpgrade{}, c.writer, task.WithCheckpointFile()).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/upgrade_curated_packages.go b/pkg/workflows/management/upgrade_curated_packages.go new file mode 100644 index 000000000000..13a8c7978142 --- /dev/null +++ b/pkg/workflows/management/upgrade_curated_packages.go @@ -0,0 +1,31 @@ +package management + +import ( + "context" + + "github.com/aws/eks-anywhere/pkg/task" +) + +type upgradeCuratedPackagesTask struct{} + +func (s *upgradeCuratedPackagesTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { + if commandContext.CurrentClusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Equal(commandContext.ClusterSpec.Cluster.Spec.RegistryMirrorConfiguration) { + return nil + } + + commandContext.PackageManager.UpgradeCuratedPackages(ctx) + + return nil +} + +func (s *upgradeCuratedPackagesTask) Name() string { + return "upgrade-curated-packages" +} + +func (s *upgradeCuratedPackagesTask) Restore(_ context.Context, _ *task.CommandContext, _ *task.CompletedTask) (task.Task, error) { + return nil, nil +} + +func (s *upgradeCuratedPackagesTask) Checkpoint() *task.CompletedTask { + return nil +} diff --git a/pkg/workflows/management/upgrade_test.go b/pkg/workflows/management/upgrade_test.go index df6a979eff02..dc42c4a9a324 100644 --- a/pkg/workflows/management/upgrade_test.go +++ b/pkg/workflows/management/upgrade_test.go @@ -10,12 +10,14 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/features" writermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks" "github.com/aws/eks-anywhere/pkg/providers" @@ -48,6 +50,7 @@ type upgradeManagementTestSetup struct { managementCluster *types.Cluster managementStatePath string management *management.Upgrade + packages *mocks.MockPackageManager } func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { @@ -65,6 +68,7 @@ func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { capiUpgrader := mocks.NewMockCAPIManager(mockCtrl) machineConfigs := []providers.MachineConfig{&v1alpha1.VSphereMachineConfig{}} clusterUpgrader := mocks.NewMockClusterUpgrader(mockCtrl) + packageUpgrader := mocks.NewMockPackageManager(mockCtrl) management := management.NewUpgrade( clientFactory, provider, @@ -75,6 +79,7 @@ func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { eksdUpgrader, eksdInstaller, clusterUpgrader, + packageUpgrader, ) for _, e := range featureEnvVars { @@ -111,6 +116,7 @@ func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { datacenterConfig: datacenterConfig, machineConfigs: machineConfigs, management: management, + packages: packageUpgrader, ctx: context.Background(), currentManagementComponents: cluster.ManagementComponentsFromBundles(currentClusterSpec.Bundles), newManagementComponents: cluster.ManagementComponentsFromBundles(newClusterSpec.Bundles), @@ -308,6 +314,10 @@ func (c *upgradeManagementTestSetup) expectPreflightValidationsToPass() { c.validator.EXPECT().PreflightValidations(c.ctx).Return(nil) } +func (c *upgradeManagementTestSetup) expectPackagesUpgrade() { + c.packages.EXPECT().UpgradeCuratedPackages(c.ctx) +} + func TestUpgradeManagementRunUpdateSetupFailed(t *testing.T) { os.Unsetenv(features.CheckpointEnabledEnvVar) features.ClearCache() @@ -730,6 +740,45 @@ func TestUpgradeManagementRunResumeClusterResourcesReconcileFailed(t *testing.T) } } +func TestUpgradeManagementRunUpgradeCuratedPackagesSuccess(t *testing.T) { + os.Unsetenv(features.CheckpointEnabledEnvVar) + features.ClearCache() + tt := newUpgradeManagementClusterTest(t) + tt.newClusterSpec.Cluster.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{} + packagesManager := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "eks-anywhere-packages", + Namespace: constants.EksaPackagesName, + }, + } + tt.client = test.NewFakeKubeClient(tt.currentClusterSpec.Cluster, tt.currentClusterSpec.EKSARelease, tt.currentClusterSpec.Bundles, packagesManager) + tt.expectSetup() + tt.expectPreflightValidationsToPass() + tt.expectUpdateSecrets(nil) + tt.expectEnsureManagementEtcdCAPIComponentsExist(nil) + tt.expectUpgradeCoreComponents() + tt.expectPauseGitOpsReconcile(nil) + tt.expectBackupManagementFromCluster(nil) + tt.expectPauseCAPIWorkloadClusters(nil) + tt.expectDatacenterConfig() + tt.expectMachineConfigs() + tt.expectInstallEksdManifest(nil) + tt.expectApplyBundles(nil) + tt.expectApplyReleases(nil) + tt.expectUpgradeManagementCluster() + tt.expectResumeCAPIWorkloadClustersAPI(nil) + tt.expectUpdateGitEksaSpec(nil) + tt.expectForceReconcileGitRepo(nil) + tt.expectResumeGitOpsReconcile(nil) + tt.expectWriteManagementClusterConfig(nil) + tt.expectPackagesUpgrade() + + err := tt.run() + if err != nil { + t.Fatalf("UpgradeManagement.Run() err = %v, want err = nil", err) + } +} + func TestUpgradeManagementRunSuccess(t *testing.T) { os.Unsetenv(features.CheckpointEnabledEnvVar) features.ClearCache() diff --git a/pkg/workflows/workload/create.go b/pkg/workflows/workload/create.go index 219a173a3d12..e7aa5146b746 100644 --- a/pkg/workflows/workload/create.go +++ b/pkg/workflows/workload/create.go @@ -19,7 +19,7 @@ type Create struct { writer filewriter.FileWriter eksdInstaller interfaces.EksdInstaller clusterCreator interfaces.ClusterCreator - packageInstaller interfaces.PackageInstaller + packageInstaller interfaces.PackageManager } // NewCreate builds a new create construct. @@ -27,7 +27,7 @@ func NewCreate(provider providers.Provider, clusterManager interfaces.ClusterManager, gitOpsManager interfaces.GitOpsManager, writer filewriter.FileWriter, eksdInstaller interfaces.EksdInstaller, - packageInstaller interfaces.PackageInstaller, + packageInstaller interfaces.PackageManager, clusterCreator interfaces.ClusterCreator, clientFactory interfaces.ClientFactory, ) *Create { diff --git a/pkg/workflows/workload/create_test.go b/pkg/workflows/workload/create_test.go index d5f667f3a8a1..5005ca2b5d32 100644 --- a/pkg/workflows/workload/create_test.go +++ b/pkg/workflows/workload/create_test.go @@ -35,7 +35,7 @@ type createTestSetup struct { writer *writermocks.MockFileWriter validator *mocks.MockValidator eksd *mocks.MockEksdInstaller - packageInstaller *mocks.MockPackageInstaller + packageInstaller *mocks.MockPackageManager clusterCreator *mocks.MockClusterCreator datacenterConfig providers.DatacenterConfig machineConfigs []providers.MachineConfig @@ -56,7 +56,7 @@ func newCreateTest(t *testing.T) *createTestSetup { provider := providermocks.NewMockProvider(mockCtrl) writer := writermocks.NewMockFileWriter(mockCtrl) eksd := mocks.NewMockEksdInstaller(mockCtrl) - packageInstaller := mocks.NewMockPackageInstaller(mockCtrl) + packageInstaller := mocks.NewMockPackageManager(mockCtrl) eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl) datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} diff --git a/pkg/workflows/workload/createcluster.go b/pkg/workflows/workload/createcluster.go index b6bca3394383..0c93e91b4dec 100644 --- a/pkg/workflows/workload/createcluster.go +++ b/pkg/workflows/workload/createcluster.go @@ -15,8 +15,14 @@ type createCluster struct{} func (c *createCluster) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { logger.Info("Creating workload cluster") + client, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.ManagementCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + if commandContext.ClusterSpec.Cluster.Namespace != "" { - if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, commandContext.ManagementCluster.KubeconfigFile, commandContext.ClientFactory); err != nil { + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, client); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } diff --git a/pkg/workflows/workload/upgrade.go b/pkg/workflows/workload/upgrade.go index 722e4f52fef1..db955c59b99c 100644 --- a/pkg/workflows/workload/upgrade.go +++ b/pkg/workflows/workload/upgrade.go @@ -20,7 +20,7 @@ type Upgrade struct { writer filewriter.FileWriter eksdInstaller interfaces.EksdInstaller clusterUpgrader interfaces.ClusterUpgrader - packageInstaller interfaces.PackageInstaller + packageInstaller interfaces.PackageManager } // NewUpgrade builds a new upgrade construct. @@ -30,7 +30,7 @@ func NewUpgrade(clientFactory interfaces.ClientFactory, writer filewriter.FileWriter, clusterUpgrader interfaces.ClusterUpgrader, eksdInstaller interfaces.EksdInstaller, - packageInstaller interfaces.PackageInstaller, + packageInstaller interfaces.PackageManager, ) *Upgrade { return &Upgrade{ clientFactory: clientFactory, diff --git a/pkg/workflows/workload/upgrade_test.go b/pkg/workflows/workload/upgrade_test.go index 1f9e3e8f3cb3..05fb1e2ad2cd 100644 --- a/pkg/workflows/workload/upgrade_test.go +++ b/pkg/workflows/workload/upgrade_test.go @@ -34,7 +34,7 @@ type upgradeTestSetup struct { writer *writermocks.MockFileWriter validator *mocks.MockValidator eksd *mocks.MockEksdInstaller - packageInstaller *mocks.MockPackageInstaller + packageInstaller *mocks.MockPackageManager clusterUpgrader *mocks.MockClusterUpgrader datacenterConfig providers.DatacenterConfig machineConfigs []providers.MachineConfig @@ -55,7 +55,7 @@ func newUpgradeTest(t *testing.T) *upgradeTestSetup { provider := providermocks.NewMockProvider(mockCtrl) writer := writermocks.NewMockFileWriter(mockCtrl) eksd := mocks.NewMockEksdInstaller(mockCtrl) - packageInstaller := mocks.NewMockPackageInstaller(mockCtrl) + packageInstaller := mocks.NewMockPackageManager(mockCtrl) eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl) datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} diff --git a/release/Makefile b/release/Makefile index 1ce8e841ef5c..9bbaa2bad199 100644 --- a/release/Makefile +++ b/release/Makefile @@ -45,7 +45,7 @@ GOBIN=$(shell go env GOBIN) endif # Setup Go -GOLANG_VERSION?="1.21" +GOLANG_VERSION?="1.22" GO_VERSION ?= $(shell source $(REPO_ROOT)/scripts/common.sh && build::common::get_go_path $(GOLANG_VERSION)) GO ?= $(GO_VERSION)/go @@ -121,9 +121,9 @@ bin/golangci-lint: unit-test: ## Run go test against code. $(GO) test -C ./cli -count=1 ./... -update-bundle-golden-files: ## Updates testdata files located under pkg/test/testdata +update-bundle-golden-files: ## Updates testdata files located under pkg/operations/testdata $(GO) test -C cli -count=1 ./pkg/operations -update - $(eval DIFF_LINE_COUNT=$(shell git diff cli/pkg/test/testdata | wc -l)) + $(eval DIFF_LINE_COUNT=$(shell git diff cli/pkg/operations/testdata | wc -l)) @if [[ $(DIFF_LINE_COUNT) != 0 ]]; then \ printf "\n\033[33mWarning:\033[0m Testdata files have been updated! Ensure that these changes were intentional.\n"; \ fi diff --git a/release/cli/cmd/release.go b/release/cli/cmd/release.go index 79775e8dd61b..60fddb3f12fa 100644 --- a/release/cli/cmd/release.go +++ b/release/cli/cmd/release.go @@ -236,7 +236,7 @@ var releaseCmd = &cobra.Command{ } bundleReleaseManifestKey := releaseConfig.BundlesManifestFilepath() - err = s3.UploadFile(bundleReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(bundleReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader) + err = s3.UploadFile(bundleReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(bundleReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader, false) if err != nil { fmt.Printf("Error uploading bundle manifest to release bucket: %+v", err) os.Exit(1) @@ -328,7 +328,7 @@ var releaseCmd = &cobra.Command{ } eksAReleaseManifestKey := releaseConfig.ReleaseManifestFilepath() - err = s3.UploadFile(eksAReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(eksAReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader) + err = s3.UploadFile(eksAReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(eksAReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader, false) if err != nil { fmt.Printf("Error uploading EKS-A release manifest to release bucket: %v", err) os.Exit(1) diff --git a/release/cli/go.mod b/release/cli/go.mod index c28921c93c13..92c4c41819eb 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -1,25 +1,25 @@ module github.com/aws/eks-anywhere/release/cli -go 1.21 +go 1.22.3 require ( - github.com/aws/aws-sdk-go v1.51.3 - github.com/aws/aws-sdk-go-v2 v1.26.0 + github.com/aws/aws-sdk-go v1.53.19 + github.com/aws/aws-sdk-go-v2 v1.27.2 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e github.com/fsouza/go-dockerclient v1.11.0 github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.2 github.com/mitchellh/go-homedir v1.1.0 - github.com/onsi/gomega v1.32.0 + github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.8.0 - github.com/spf13/viper v1.18.2 - golang.org/x/sync v0.6.0 - helm.sh/helm/v3 v3.14.3 - k8s.io/apimachinery v0.29.3 + github.com/spf13/viper v1.19.0 + golang.org/x/sync v0.7.0 + helm.sh/helm/v3 v3.15.1 + k8s.io/apimachinery v0.30.1 k8s.io/helm v2.17.0+incompatible - sigs.k8s.io/controller-runtime v0.16.5 + sigs.k8s.io/controller-runtime v0.18.3 sigs.k8s.io/yaml v1.4.0 ) @@ -36,7 +36,7 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect - github.com/aws/smithy-go v1.20.1 // indirect + github.com/aws/smithy-go v1.20.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect @@ -54,7 +54,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -74,7 +74,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect @@ -89,7 +89,7 @@ require ( github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.0 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.9 // indirect @@ -118,7 +118,7 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect @@ -139,40 +139,40 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect - go.opentelemetry.io/otel v1.20.0 // indirect - go.opentelemetry.io/otel/metric v1.20.0 // indirect - go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.20.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect - google.golang.org/grpc v1.59.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/grpc v1.62.1 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.3 // indirect - k8s.io/apiextensions-apiserver v0.29.1 // indirect - k8s.io/apiserver v0.29.3 // indirect - k8s.io/cli-runtime v0.29.0 // indirect - k8s.io/client-go v0.29.3 // indirect - k8s.io/component-base v0.29.3 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/kubectl v0.29.0 // indirect + k8s.io/api v0.30.1 // indirect + k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/apiserver v0.30.1 // indirect + k8s.io/cli-runtime v0.30.0 // indirect + k8s.io/client-go v0.30.1 // indirect + k8s.io/component-base v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kubectl v0.30.0 // indirect k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index 0b0530515efb..c800a136d007 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,14 +56,14 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.3 h1:OqSyEXcJwf/XhZNVpMRgKlLA9nmbo5X8dwbll4RWxq8= -github.com/aws/aws-sdk-go v1.51.3/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= -github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= +github.com/aws/aws-sdk-go v1.53.19 h1:WEuWc918RXlIaPCyU11F7hH9H1ItK+8m2c/uoQNRUok= +github.com/aws/aws-sdk-go v1.53.19/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8= +github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e/go.mod h1:p/KHVJAMv3kofnUnShkZ6pUnZYzm+LK2G7bIi8nnTKA= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -159,8 +159,8 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -191,9 +191,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= @@ -252,7 +251,8 @@ github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfC github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= @@ -283,6 +283,8 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= @@ -299,6 +301,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -308,14 +311,14 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= @@ -382,8 +385,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -490,21 +493,21 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -584,14 +587,15 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -601,8 +605,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -638,19 +643,19 @@ go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U= -go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= -go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= -go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= -go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= -go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= -go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -669,8 +674,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -685,8 +690,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -699,7 +704,6 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -711,13 +715,13 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -726,8 +730,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -753,20 +757,21 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -794,8 +799,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -810,22 +815,22 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -834,6 +839,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -867,32 +874,32 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.14.3 h1:HmvRJlwyyt9HjgmAuxHbHv3PhMz9ir/XNWHyXfmnOP4= -helm.sh/helm/v3 v3.14.3/go.mod h1:v6myVbyseSBJTzhmeE39UcPLNv6cQK6qss3dvgAySaE= +helm.sh/helm/v3 v3.15.1 h1:22ztacHz4gMqhXNqCQ9NAg6BFWoRUryNLvnkz6OVyw0= +helm.sh/helm/v3 v3.15.1/go.mod h1:fvfoRcB8UKRUV5jrIfOTaN/pG1TPhuqSb56fjYdTKXg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= -k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= -k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4= -k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk= +k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= +k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= +k8s.io/cli-runtime v0.30.0 h1:0vn6/XhOvn1RJ2KJOC6IRR2CGqrpT6QQF4+8pYpWQ48= +k8s.io/cli-runtime v0.30.0/go.mod h1:vATpDMATVTMA79sZ0YUCzlMelf6rUjoBzlp+RnoM+cg= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= -k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/helm v2.17.0+incompatible h1:Bpn6o1wKLYqKM3+Osh8e+1/K2g/GsQJ4F4yNF2+deao= @@ -900,13 +907,13 @@ k8s.io/helm v2.17.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= -k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.30.0 h1:xbPvzagbJ6RNYVMVuiHArC1grrV5vSmmIcSZuCdzRyk= +k8s.io/kubectl v0.30.0/go.mod h1:zgolRw2MQXLPwmic2l/+iHs239L49fhSeICuMhQQXTI= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= @@ -918,8 +925,8 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= -sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw= -sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/controller-runtime v0.18.3 h1:B5Wmmo8WMWK7izei+2LlXLVDGzMwAHBNLX68lwtlSR4= +sigs.k8s.io/controller-runtime v0.18.3/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= diff --git a/release/cli/pkg/assets/archives/archives.go b/release/cli/pkg/assets/archives/archives.go index cf175f491f11..4044c5c49615 100644 --- a/release/cli/pkg/assets/archives/archives.go +++ b/release/cli/pkg/assets/archives/archives.go @@ -145,6 +145,60 @@ func KernelArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettype return sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, nil } +func RTOSArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion, latestPath, arch string) (string, string, string, string, error) { + var sourceS3Key string + var sourceS3Prefix string + var releaseS3Path string + var releaseName string + + imageExtensions := map[string]string{ + "ami": "gz", + "ova": "ova", + "raw": "gz", + } + imageExtension := imageExtensions[archive.Format] + + if rc.DevRelease || rc.ReleaseEnvironment == "development" { + sourceS3Key = fmt.Sprintf("%s.%s", archive.OSName, imageExtension) + sourceS3Prefix = fmt.Sprintf("%s/%s", projectPath, latestPath) + } else { + sourceS3Key = fmt.Sprintf("%s-%s-eks-a-%d-%s.%s", + archive.OSName, + eksDReleaseChannel, + rc.BundleNumber, + arch, + imageExtension, + ) + sourceS3Prefix = fmt.Sprintf("releases/bundles/%d/artifacts/rtos/%s", rc.BundleNumber, eksDReleaseChannel) + } + + if rc.DevRelease { + releaseName = fmt.Sprintf("%s-%s-eks-a-%s-%s.%s", + archive.OSName, + eksDReleaseChannel, + rc.DevReleaseUriVersion, + arch, + imageExtension, + ) + releaseS3Path = fmt.Sprintf("artifacts/%s/rtos/%s/%s", + rc.DevReleaseUriVersion, + archive.Format, + eksDReleaseChannel, + ) + } else { + releaseName = fmt.Sprintf("%s-%s-eks-a-%d-%s.%s", + archive.OSName, + eksDReleaseChannel, + rc.BundleNumber, + arch, + imageExtension, + ) + releaseS3Path = fmt.Sprintf("releases/bundles/%d/artifacts/rtos/%s", rc.BundleNumber, eksDReleaseChannel) + } + + return sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, nil +} + func GetArchiveAssets(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion string) (*releasetypes.ArchiveArtifact, error) { os := "linux" arch := "amd64" @@ -181,6 +235,7 @@ func GetArchiveAssets(rc *releasetypes.ReleaseConfig, archive *assettypes.Archiv ProjectPath: projectPath, SourcedFromBranch: sourcedFromBranch, ImageFormat: archive.Format, + Private: archive.Private, } return archiveArtifact, nil diff --git a/release/cli/pkg/assets/config/bundle_release.go b/release/cli/pkg/assets/config/bundle_release.go index e2e78c9adc70..287a878f9aec 100644 --- a/release/cli/pkg/assets/config/bundle_release.go +++ b/release/cli/pkg/assets/config/bundle_release.go @@ -66,6 +66,22 @@ var bundleReleaseAssetsConfigMap = []assettypes.AssetConfig{ }, HasReleaseBranches: true, }, + // Canonical Ubuntu RTOS artifacts + { + ProjectName: "ubuntu-rtos", + ProjectPath: "projects/canonical/ubuntu", + GitTagAssigner: tagger.NonExistentTagAssigner, + Archives: []*assettypes.Archive{ + { + Name: "rtos", + Format: "raw", + OSName: "ubuntu", + OSVersion: "22.04", + ArchiveS3PathGetter: archives.RTOSArtifactPathGetter, + Private: true, + }, + }, + }, // Cert-manager artifacts { ProjectName: "cert-manager", diff --git a/release/cli/pkg/assets/manifests/manifests.go b/release/cli/pkg/assets/manifests/manifests.go index b0333797d91f..6960363c9322 100644 --- a/release/cli/pkg/assets/manifests/manifests.go +++ b/release/cli/pkg/assets/manifests/manifests.go @@ -68,6 +68,7 @@ func GetManifestAssets(rc *releasetypes.ReleaseConfig, manifestComponent *assett ProjectPath: projectPath, SourcedFromBranch: sourcedFromBranch, Component: componentName, + Private: manifestComponent.Private, } return manifestArtifact, nil diff --git a/release/cli/pkg/assets/types/types.go b/release/cli/pkg/assets/types/types.go index 413b7d3add35..cf34c6756a9d 100644 --- a/release/cli/pkg/assets/types/types.go +++ b/release/cli/pkg/assets/types/types.go @@ -23,6 +23,7 @@ type ManifestComponent struct { ReleaseManifestPrefix string ManifestFiles []string NoVersionSuffix bool + Private bool } type ImageTagConfiguration struct { @@ -47,6 +48,7 @@ type Archive struct { OSVersion string ArchitectureOverride string ArchiveS3PathGetter ArchiveS3PathGenerator + Private bool } type AssetConfig struct { diff --git a/release/cli/pkg/aws/s3/s3.go b/release/cli/pkg/aws/s3/s3.go index 34b7eb9d5f81..27e98f7a6444 100644 --- a/release/cli/pkg/aws/s3/s3.go +++ b/release/cli/pkg/aws/s3/s3.go @@ -22,6 +22,8 @@ import ( "path/filepath" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/pkg/errors" ) @@ -42,7 +44,7 @@ func Read(bucket, key string) (io.ReadCloser, error) { return resp.Body, nil } -func DownloadFile(filePath, bucket, key string) error { +func DownloadFile(filePath, bucket, key string, s3Downloader *s3manager.Downloader, private bool) error { if err := os.MkdirAll(filepath.Dir(filePath), 0o755); err != nil { return errors.Cause(err) } @@ -53,32 +55,46 @@ func DownloadFile(filePath, bucket, key string) error { } defer fd.Close() - body, err := Read(bucket, key) - if err != nil { - return err - } - - defer body.Close() - - if _, err = io.Copy(fd, body); err != nil { - return err + if private { + _, err = s3Downloader.Download(fd, &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + return err + } + } else { + body, err := Read(bucket, key) + if err != nil { + return err + } + + defer body.Close() + + if _, err = io.Copy(fd, body); err != nil { + return err + } } return nil } -func UploadFile(filePath string, bucket, key *string, s3Uploader *s3manager.Uploader) error { +func UploadFile(filePath string, bucket, key *string, s3Uploader *s3manager.Uploader, private bool) error { fd, err := os.Open(filePath) if err != nil { return errors.Cause(err) } defer fd.Close() + objectCannedACL := s3.ObjectCannedACLPublicRead + if private { + objectCannedACL = s3.ObjectCannedACLPrivate + } result, err := s3Uploader.Upload(&s3manager.UploadInput{ Bucket: bucket, Key: key, Body: fd, - ACL: aws.String("public-read"), + ACL: aws.String(objectCannedACL), }) if err != nil { return errors.Cause(err) @@ -88,13 +104,30 @@ func UploadFile(filePath string, bucket, key *string, s3Uploader *s3manager.Uplo return nil } -func KeyExists(bucket, key string) bool { - objectUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucket, key) - - resp, err := http.Head(objectUrl) - if err != nil || resp.StatusCode != http.StatusOK { - return false +func KeyExists(s3Client *s3.S3, bucket, key string, private bool) (bool, error) { + if private { + _, err := s3Client.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { + return false, nil + } + return false, fmt.Errorf("calling S3 HeadObject API to check if object is present: %v", err) + } + } else { + objectUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucket, key) + + resp, err := http.Head(objectUrl) + if err != nil { + return false, fmt.Errorf("making HTTP HEAD request to check if object is present: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return false, nil + } } - return true + return true, nil } diff --git a/release/cli/pkg/clients/clients.go b/release/cli/pkg/clients/clients.go index ab22cda2d33c..88cbeca53476 100644 --- a/release/cli/pkg/clients/clients.go +++ b/release/cli/pkg/clients/clients.go @@ -41,7 +41,8 @@ type ReleaseClients struct { } type SourceS3Clients struct { - Client *s3.S3 + Client *s3.S3 + Downloader *s3manager.Downloader } type ReleaseS3Clients struct { @@ -88,6 +89,7 @@ func CreateDevReleaseClients(dryRun bool) (*SourceClients, *ReleaseClients, erro // S3 client and uploader s3Client := s3.New(pdxSession) + downloader := s3manager.NewDownloader(pdxSession) uploader := s3manager.NewUploader(pdxSession) // Get source ECR auth config @@ -107,7 +109,8 @@ func CreateDevReleaseClients(dryRun bool) (*SourceClients, *ReleaseClients, erro // Constructing source clients sourceClients := &SourceClients{ S3: &SourceS3Clients{ - Client: s3Client, + Client: s3Client, + Downloader: downloader, }, ECR: &SourceECRClient{ EcrClient: ecrClient, @@ -162,6 +165,7 @@ func CreateStagingReleaseClients() (*SourceClients, *ReleaseClients, error) { // Release S3 client and uploader releaseS3Client := s3.New(releaseSession) + downloader := s3manager.NewDownloader(releaseSession) uploader := s3manager.NewUploader(releaseSession) // Get source ECR auth config @@ -181,7 +185,8 @@ func CreateStagingReleaseClients() (*SourceClients, *ReleaseClients, error) { // Constructing source clients sourceClients := &SourceClients{ S3: &SourceS3Clients{ - Client: sourceS3Client, + Client: sourceS3Client, + Downloader: downloader, }, ECR: &SourceECRClient{ EcrClient: ecrClient, @@ -237,6 +242,7 @@ func CreateProdReleaseClients() (*SourceClients, *ReleaseClients, error) { // Release S3 client and uploader releaseS3Client := s3.New(releaseSession) + downloader := s3manager.NewDownloader(releaseSession) uploader := s3manager.NewUploader(releaseSession) // Get source ECR Public auth config @@ -256,7 +262,8 @@ func CreateProdReleaseClients() (*SourceClients, *ReleaseClients, error) { // Constructing release clients sourceClients := &SourceClients{ S3: &SourceS3Clients{ - Client: sourceS3Client, + Client: sourceS3Client, + Downloader: downloader, }, ECR: &SourceECRClient{ EcrPublicClient: sourceEcrPublicClient, diff --git a/release/cli/pkg/filereader/file_reader.go b/release/cli/pkg/filereader/file_reader.go index a38470d1d9a9..077155625970 100644 --- a/release/cli/pkg/filereader/file_reader.go +++ b/release/cli/pkg/filereader/file_reader.go @@ -180,6 +180,9 @@ func GetEksDReleaseManifestUrl(releaseChannel, releaseNumber string, dev bool) s // GetNextEksADevBuildNumber computes next eksa dev build number for the current eks-a dev build func GetNextEksADevBuildNumber(releaseVersion string, r *releasetypes.ReleaseConfig) (int, error) { + if r.DryRun { + return 0, nil + } tempFileName := "latest-dev-release-version" var latestReleaseKey, latestBuildVersion string @@ -189,8 +192,14 @@ func GetNextEksADevBuildNumber(releaseVersion string, r *releasetypes.ReleaseCon } else { latestReleaseKey = fmt.Sprintf("%s/LATEST_RELEASE_VERSION", r.BuildRepoBranchName) } - if s3.KeyExists(r.ReleaseBucket, latestReleaseKey) { - err := s3.DownloadFile(tempFileName, r.ReleaseBucket, latestReleaseKey) + + keyExists, err := s3.KeyExists(r.ReleaseClients.S3.Client, r.ReleaseBucket, latestReleaseKey, false) + if err != nil { + return -1, errors.Cause(err) + } + + if keyExists { + err := s3.DownloadFile(tempFileName, r.ReleaseBucket, latestReleaseKey, r.SourceClients.S3.Downloader, false) if err != nil { return -1, errors.Cause(err) } @@ -286,7 +295,7 @@ func PutEksAReleaseVersion(version string, r *releasetypes.ReleaseConfig) error // Upload the file to S3 fmt.Println("Uploading latest release version file") - err = s3.UploadFile(currentReleaseKey, aws.String(r.ReleaseBucket), aws.String(currentReleaseKey), r.ReleaseClients.S3.Uploader) + err = s3.UploadFile(currentReleaseKey, aws.String(r.ReleaseBucket), aws.String(currentReleaseKey), r.ReleaseClients.S3.Uploader, false) if err != nil { return errors.Cause(err) } diff --git a/release/cli/pkg/images/images.go b/release/cli/pkg/images/images.go index f9324355ba76..51e42f68fcf2 100644 --- a/release/cli/pkg/images/images.go +++ b/release/cli/pkg/images/images.go @@ -83,13 +83,13 @@ func PollForExistence(devRelease bool, authConfig *docker.AuthConfiguration, ima bodyStr := string(body) if strings.Contains(bodyStr, "MANIFEST_UNKNOWN") { - return fmt.Errorf("Requested image not found") + return fmt.Errorf("requested image not found: %v", imageUri) } return nil }) if err != nil { - return fmt.Errorf("retries exhausted waiting for source image %s to be available for copy: %v", imageUri, err) + return fmt.Errorf("retries exhausted waiting for source image [%s] to be available for copy: %v", imageUri, err) } return nil @@ -118,7 +118,7 @@ func CopyToDestination(sourceAuthConfig, releaseAuthConfig *docker.AuthConfigura return nil }) if err != nil { - return fmt.Errorf("retries exhausted performing image copy from source to destination: %v", err) + return fmt.Errorf("retries exhausted performing image copy from source [%s] to destination [%s]: %v", sourceImageUri, releaseImageUri, err) } return nil @@ -321,7 +321,12 @@ func GetPreviousReleaseImageSemver(r *releasetypes.ReleaseConfig, releaseImageUr bundles := &anywherev1alpha1.Bundles{} bundleReleaseManifestKey := r.BundlesManifestFilepath() bundleManifestUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", r.ReleaseBucket, bundleReleaseManifestKey) - if s3.KeyExists(r.ReleaseBucket, bundleReleaseManifestKey) { + + keyExists, err := s3.KeyExists(r.ReleaseClients.S3.Client, r.ReleaseBucket, bundleReleaseManifestKey, false) + if err != nil { + return "", fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", bundleReleaseManifestKey, err) + } + if keyExists { contents, err := filereader.ReadHttpFile(bundleManifestUrl) if err != nil { return "", fmt.Errorf("Error reading bundle manifest from S3: %v", err) diff --git a/release/cli/pkg/operations/bundle_release_test.go b/release/cli/pkg/operations/bundle_release_test.go index e7021a85ba3b..411cd3f546a7 100644 --- a/release/cli/pkg/operations/bundle_release_test.go +++ b/release/cli/pkg/operations/bundle_release_test.go @@ -38,7 +38,7 @@ import ( const ( releaseFolder = "release" - testdataFolder = "cli/pkg/test/testdata" + testdataFolder = "cli/pkg/operations/testdata" generatedBundleFolder = "generated-bundles" ) diff --git a/release/cli/pkg/operations/download.go b/release/cli/pkg/operations/download.go index 6708f852d4d2..f3c87ed7eb62 100644 --- a/release/cli/pkg/operations/download.go +++ b/release/cli/pkg/operations/download.go @@ -21,6 +21,8 @@ import ( "strings" "time" + s3sdk "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -42,6 +44,12 @@ func DownloadArtifacts(ctx context.Context, r *releasetypes.ReleaseConfig, eksaA } return false, 0 })) + var s3Client *s3sdk.S3 + var s3Downloader *s3manager.Downloader + if !r.DryRun { + s3Client = r.SourceClients.S3.Client + s3Downloader = r.SourceClients.S3.Downloader + } fmt.Println("==========================================================") fmt.Println(" Artifacts Download") fmt.Println("==========================================================") @@ -55,12 +63,12 @@ func DownloadArtifacts(ctx context.Context, r *releasetypes.ReleaseConfig, eksaA errGroup.Go(func() error { // Check if there is an archive to be downloaded if artifact.Archive != nil { - return handleArchiveDownload(ctx, r, artifact, s3Retrier) + return handleArchiveDownload(ctx, r, artifact, s3Retrier, s3Client, s3Downloader) } // Check if there is a manifest to be downloaded if artifact.Manifest != nil { - return handleManifestDownload(ctx, r, artifact, s3Retrier) + return handleManifestDownload(ctx, r, artifact, s3Retrier, s3Client, s3Downloader) } return nil @@ -76,7 +84,7 @@ func DownloadArtifacts(ctx context.Context, r *releasetypes.ReleaseConfig, eksaA return nil } -func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier) error { +func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier, s3Client *s3sdk.S3, s3Downloader *s3manager.Downloader) error { sourceS3Prefix := artifact.Archive.SourceS3Prefix sourceS3Key := artifact.Archive.SourceS3Key artifactPath := artifact.Archive.ArtifactPath @@ -87,8 +95,13 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art fmt.Println("Skipping OS image downloads in dry-run mode") } else { err := s3Retrier.Retry(func() error { - if !s3.KeyExists(r.SourceBucket, objectKey) { - return fmt.Errorf("requested object not found") + keyExists, err := s3.KeyExists(s3Client, r.SourceBucket, objectKey, artifact.Archive.Private) + if err != nil { + return fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", objectKey, err) + } + + if !keyExists { + return fmt.Errorf("requested object not found: %v", objectKey) } return nil }) @@ -107,11 +120,11 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art } objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key) } else { - return fmt.Errorf("retries exhausted waiting for archive to be uploaded to source location: %v", err) + return fmt.Errorf("retries exhausted waiting for source archive [%s] to be available for download: %v", objectKey, err) } } - err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey) + err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey, s3Downloader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -136,8 +149,13 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art fmt.Printf("Checksum file - %s\n", objectShasumFileKey) err := s3Retrier.Retry(func() error { - if !s3.KeyExists(r.SourceBucket, objectShasumFileKey) { - return fmt.Errorf("requested object not found") + keyExists, err := s3.KeyExists(s3Client, r.SourceBucket, objectShasumFileKey, artifact.Archive.Private) + if err != nil { + return fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", objectShasumFileKey, err) + } + + if !keyExists { + return fmt.Errorf("requested object not found: %v", objectShasumFileKey) } return nil }) @@ -156,11 +174,11 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art } objectShasumFileKey = filepath.Join(latestSourceS3PrefixFromMain, objectShasumFileName) } else { - return fmt.Errorf("retries exhausted waiting for checksum file to be uploaded to source location: %v", err) + return fmt.Errorf("retries exhausted waiting for source checksum file [%s] to be available for download: %v", objectShasumFileKey, err) } } - err = s3.DownloadFile(objectShasumFileLocalFilePath, r.SourceBucket, objectShasumFileKey) + err = s3.DownloadFile(objectShasumFileLocalFilePath, r.SourceBucket, objectShasumFileKey, s3Downloader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -170,7 +188,7 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art return nil } -func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier) error { +func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier, s3Client *s3sdk.S3, s3Downloader *s3manager.Downloader) error { sourceS3Prefix := artifact.Manifest.SourceS3Prefix sourceS3Key := artifact.Manifest.SourceS3Key artifactPath := artifact.Manifest.ArtifactPath @@ -179,8 +197,13 @@ func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, ar fmt.Printf("Manifest - %s\n", objectKey) err := s3Retrier.Retry(func() error { - if !s3.KeyExists(r.SourceBucket, objectKey) { - return fmt.Errorf("Requested object not found") + keyExists, err := s3.KeyExists(s3Client, r.SourceBucket, objectKey, artifact.Manifest.Private) + if err != nil { + return fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", objectKey, err) + } + + if !keyExists { + return fmt.Errorf("requested object not found: %v", objectKey) } return nil }) @@ -194,11 +217,11 @@ func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, ar latestSourceS3PrefixFromMain := strings.NewReplacer(r.BuildRepoBranchName, "latest", artifact.Manifest.GitTag, gitTagFromMain).Replace(sourceS3Prefix) objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key) } else { - return fmt.Errorf("retries exhausted waiting for archive to be uploaded to source location: %v", err) + return fmt.Errorf("retries exhausted waiting for source manifest [%s] to be available for download: %v", objectKey, err) } } - err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey) + err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey, s3Downloader, artifact.Manifest.Private) if err != nil { return errors.Cause(err) } diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml similarity index 70% rename from release/cli/pkg/test/testdata/main-bundle-release.yaml rename to release/cli/pkg/operations/testdata/main-bundle-release.yaml index c7f16ed415ca..f413688bca09 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -10,7 +10,7 @@ spec: versionsBundles: - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -19,7 +19,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -28,27 +28,27 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -57,7 +57,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-40-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -67,7 +67,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -76,7 +76,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -85,7 +85,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -94,10 +94,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -106,32 +106,32 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -152,7 +152,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -161,13 +161,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -176,7 +176,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -185,13 +185,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -200,7 +200,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -209,15 +209,15 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -226,7 +226,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -235,10 +235,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -252,7 +252,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -261,7 +261,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -280,7 +280,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -289,10 +289,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-40-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.25.16 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-35.yaml - name: kubernetes-1-25-eks-35 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-40.yaml + name: kubernetes-1-25-eks-40 ova: bottlerocket: {} raw: @@ -306,7 +306,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -315,9 +315,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -326,11 +326,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -339,7 +339,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -348,13 +348,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -363,7 +363,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -372,10 +372,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -385,7 +385,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -394,7 +394,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -403,7 +403,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -412,8 +412,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -423,11 +423,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.25" nutanix: cloudProvider: @@ -447,11 +447,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -460,10 +460,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -473,12 +473,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -487,7 +487,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -496,8 +496,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -507,7 +507,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-40-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -518,7 +518,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -562,7 +562,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -575,7 +575,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -584,7 +584,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -593,7 +593,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -602,7 +602,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -611,7 +611,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -620,7 +620,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -638,7 +638,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -648,7 +648,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -657,18 +657,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -677,18 +677,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -697,7 +697,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -707,7 +707,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -716,7 +716,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -725,12 +725,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -741,7 +741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-40-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -751,11 +751,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -764,7 +764,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -773,7 +773,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -782,13 +782,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cloud-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.25.1-eks-d-1-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.25.3-eks-d-1-25-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -797,7 +797,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -806,27 +806,27 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -835,7 +835,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-31-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-37-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -845,7 +845,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -854,7 +854,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -863,7 +863,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -872,10 +872,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -884,32 +884,32 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -930,7 +930,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -939,13 +939,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -954,7 +954,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -963,13 +963,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -978,7 +978,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -987,15 +987,15 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1004,7 +1004,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1013,10 +1013,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -1030,7 +1030,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -1039,7 +1039,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -1058,7 +1058,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -1067,10 +1067,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.14-eks-d-1-26-31-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.26.14 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-31.yaml - name: kubernetes-1-26-eks-31 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.15-eks-d-1-26-37-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.26.15 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-37.yaml + name: kubernetes-1-26-eks-37 ova: bottlerocket: {} raw: @@ -1084,7 +1084,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1093,9 +1093,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1104,11 +1104,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -1117,7 +1117,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1126,13 +1126,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -1141,7 +1141,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1150,10 +1150,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -1163,7 +1163,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -1172,7 +1172,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -1181,7 +1181,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -1190,8 +1190,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -1201,11 +1201,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.26" nutanix: cloudProvider: @@ -1225,11 +1225,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -1238,10 +1238,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -1251,12 +1251,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -1265,7 +1265,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -1274,8 +1274,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -1285,7 +1285,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-31-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-37-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -1296,7 +1296,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1340,7 +1340,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -1353,7 +1353,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -1362,7 +1362,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -1371,7 +1371,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -1380,7 +1380,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -1389,7 +1389,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -1398,7 +1398,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -1416,7 +1416,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -1426,7 +1426,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -1435,18 +1435,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -1455,18 +1455,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -1475,7 +1475,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -1485,7 +1485,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -1494,7 +1494,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -1503,12 +1503,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -1519,7 +1519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-31-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-37-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -1529,11 +1529,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -1542,7 +1542,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1551,7 +1551,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1560,13 +1560,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cloud-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.26.0-eks-d-1-26-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.26.2-eks-d-1-26-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -1575,7 +1575,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1584,27 +1584,27 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -1613,7 +1613,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-31-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1623,7 +1623,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -1632,7 +1632,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -1641,7 +1641,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -1650,10 +1650,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -1662,32 +1662,32 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -1708,7 +1708,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1717,13 +1717,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -1732,7 +1732,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1741,13 +1741,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -1756,7 +1756,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1765,15 +1765,15 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1782,7 +1782,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1791,10 +1791,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -1808,7 +1808,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -1817,7 +1817,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -1836,7 +1836,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -1845,10 +1845,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.11-eks-d-1-27-25-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.27.11 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-25.yaml - name: kubernetes-1-27-eks-25 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.14-eks-d-1-27-31-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.27.14 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-31.yaml + name: kubernetes-1-27-eks-31 ova: bottlerocket: {} raw: @@ -1862,7 +1862,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1871,9 +1871,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1882,11 +1882,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -1895,7 +1895,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1904,13 +1904,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -1919,7 +1919,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1928,10 +1928,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -1941,7 +1941,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -1950,7 +1950,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -1959,7 +1959,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -1968,8 +1968,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -1979,11 +1979,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.27" nutanix: cloudProvider: @@ -2003,11 +2003,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2016,10 +2016,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -2029,12 +2029,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2043,7 +2043,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2052,8 +2052,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2063,7 +2063,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-31-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -2074,7 +2074,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2118,7 +2118,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -2131,7 +2131,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -2140,7 +2140,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -2149,7 +2149,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -2158,7 +2158,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -2167,7 +2167,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -2176,7 +2176,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -2194,7 +2194,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -2204,7 +2204,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -2213,18 +2213,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -2233,18 +2233,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -2253,7 +2253,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -2263,7 +2263,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -2272,7 +2272,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -2281,12 +2281,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -2297,7 +2297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-31-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -2307,11 +2307,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -2320,7 +2320,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2329,7 +2329,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2340,11 +2340,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.27.0-eks-d-1-27-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -2353,7 +2353,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2362,27 +2362,27 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -2391,7 +2391,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-18-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-24-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -2401,7 +2401,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -2410,7 +2410,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -2419,7 +2419,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -2428,10 +2428,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -2440,32 +2440,32 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -2486,7 +2486,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2495,13 +2495,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -2510,7 +2510,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2519,13 +2519,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -2534,7 +2534,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2543,15 +2543,15 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -2560,7 +2560,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2569,10 +2569,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -2586,7 +2586,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -2595,7 +2595,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -2614,7 +2614,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -2623,10 +2623,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.7-eks-d-1-28-18-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.28.7 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-18.yaml - name: kubernetes-1-28-eks-18 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.10-eks-d-1-28-24-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.28.10 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-24.yaml + name: kubernetes-1-28-eks-24 ova: bottlerocket: {} raw: @@ -2640,7 +2640,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -2649,9 +2649,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -2660,11 +2660,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -2673,7 +2673,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2682,13 +2682,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -2697,7 +2697,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2706,10 +2706,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -2719,7 +2719,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -2728,7 +2728,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -2737,7 +2737,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -2746,8 +2746,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -2757,11 +2757,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.28" nutanix: cloudProvider: @@ -2781,11 +2781,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2794,10 +2794,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -2807,12 +2807,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2821,7 +2821,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2830,8 +2830,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2841,7 +2841,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-18-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-24-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -2852,7 +2852,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2896,7 +2896,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -2909,7 +2909,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -2918,7 +2918,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -2927,7 +2927,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -2936,7 +2936,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -2945,7 +2945,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -2954,7 +2954,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -2972,7 +2972,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -2982,7 +2982,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -2991,18 +2991,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -3011,18 +3011,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -3031,7 +3031,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -3041,7 +3041,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -3050,7 +3050,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -3059,12 +3059,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -3075,7 +3075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-18-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-24-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3085,11 +3085,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -3098,7 +3098,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3107,7 +3107,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3118,11 +3118,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.28.0-eks-d-1-28-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -3131,7 +3131,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3140,27 +3140,27 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -3169,7 +3169,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-7-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-13-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -3179,7 +3179,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -3188,7 +3188,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -3197,7 +3197,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -3206,10 +3206,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -3218,32 +3218,32 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -3264,7 +3264,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3273,13 +3273,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -3288,7 +3288,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3297,13 +3297,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -3312,7 +3312,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3321,15 +3321,15 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -3338,7 +3338,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3347,10 +3347,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -3364,7 +3364,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -3373,7 +3373,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -3392,7 +3392,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -3401,10 +3401,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.1-eks-d-1-29-7-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.29.1 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-7.yaml - name: kubernetes-1-29-eks-7 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.5-eks-d-1-29-13-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.29.5 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-13.yaml + name: kubernetes-1-29-eks-13 ova: bottlerocket: {} raw: @@ -3418,7 +3418,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -3427,9 +3427,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -3438,11 +3438,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -3451,7 +3451,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3460,13 +3460,13 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -3475,7 +3475,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3484,10 +3484,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -3497,7 +3497,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -3506,7 +3506,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -3515,7 +3515,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -3524,8 +3524,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -3535,11 +3535,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.29" nutanix: cloudProvider: @@ -3559,11 +3559,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -3572,10 +3572,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -3585,12 +3585,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -3599,7 +3599,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -3608,8 +3608,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -3619,7 +3619,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-7-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-13-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3630,7 +3630,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3674,7 +3674,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -3687,7 +3687,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -3696,7 +3696,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -3705,7 +3705,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -3714,7 +3714,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -3723,7 +3723,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -3732,7 +3732,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -3750,7 +3750,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -3760,7 +3760,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -3769,18 +3769,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -3789,18 +3789,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -3809,7 +3809,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -3819,7 +3819,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -3828,7 +3828,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -3837,12 +3837,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -3853,7 +3853,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-7-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-13-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3863,11 +3863,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -3876,7 +3876,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3885,7 +3885,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3896,6 +3896,784 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.29.0-eks-d-1-29-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 + - bootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for kubeadm-bootstrap-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-bootstrap-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 + bottlerocketHostContainers: + admin: + arch: + - amd64 + description: Container image for bottlerocket-admin image + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 + name: bottlerocket-admin + os: linux + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 + control: + arch: + - amd64 + description: Container image for bottlerocket-control image + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 + name: bottlerocket-control + os: linux + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 + kubeadmBootstrap: + arch: + - amd64 + - arm64 + description: Container image for bottlerocket-bootstrap image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: bottlerocket-bootstrap + os: linux + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-6-eks-a-v0.0.0-dev-build.1 + certManager: + acmesolver: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-acmesolver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-acmesolver + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 + cainjector: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-cainjector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-cainjector + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 + controller: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-controller + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 + ctl: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-ctl image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-ctl + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 + webhook: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-webhook image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-webhook + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 + cilium: + cilium: + arch: + - amd64 + description: Container image for cilium image + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 + name: cilium + os: linux + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 + helmChart: + description: Helm chart for cilium-chart + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c + name: cilium-chart + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml + operator: + arch: + - amd64 + description: Container image for operator-generic image + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 + name: operator-generic + os: linux + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 + cloudStack: + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-cloudstack image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-cloudstack + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.10-rc1-eks-a-v0.0.0-dev-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/infrastructure-components.yaml + kubeRbacProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml + version: v0.4.10-rc1+abcdef1 + clusterAPI: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 + controlPlane: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for kubeadm-control-plane-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-control-plane-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 + docker: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + manager: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-docker image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-docker + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 + eksD: + ami: + bottlerocket: {} + channel: 1-30 + components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml + containerd: + arch: + - amd64 + description: containerd tarball for linux/amd64 + name: containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/f089d308442c18f487a52d09fd067ae9ac7cd8f2/etcdadm-v0.0.0-dev-build.0-linux-amd64.tar.gz + gitCommit: 0123456789abcdef0123456789abcdef01234567 + imagebuilder: + arch: + - amd64 + description: image-builder tarball for linux/amd64 + name: image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + kindNode: + arch: + - amd64 + - arm64 + description: Container image for kind-node image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kind-node + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.1-eks-d-1-30-6-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.30.1 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-6.yaml + name: kubernetes-1-30-eks-6 + ova: + bottlerocket: {} + raw: + bottlerocket: {} + eksa: + cliTools: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-cli-tools image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cli-tools + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 + clusterController: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-cluster-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cluster-controller + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml + diagnosticCollector: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-diagnostic-collector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-diagnostic-collector + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 + version: v0.0.0-dev+build.0+abcdef1 + etcdadmBootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for etcdadm-bootstrap-provider image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-bootstrap-provider + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 + etcdadmController: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for etcdadm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-controller + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 + flux: + helmController: + arch: + - amd64 + - arm64 + description: Container image for helm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: helm-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 + kustomizeController: + arch: + - amd64 + - arm64 + description: Container image for kustomize-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kustomize-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + notificationController: + arch: + - amd64 + - arm64 + description: Container image for notification-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: notification-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + sourceController: + arch: + - amd64 + - arm64 + description: Container image for source-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: source-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 + version: v2.3.0+abcdef1 + haproxy: + image: + arch: + - amd64 + - arm64 + description: Container image for haproxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: haproxy + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 + kindnetd: + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 + kubeVersion: "1.30" + nutanix: + cloudProvider: + arch: + - amd64 + - arm64 + description: Container image for cloud-provider-nutanix image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cloud-provider-nutanix + os: linux + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cloud-provider-nutanix/controller:v0.3.2-eks-a-v0.0.0-dev-build.1 + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-nutanix image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-nutanix + os: linux + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 + packageController: + credentialProviderPackage: + arch: + - amd64 + - arm64 + description: Container image for credential-provider-package image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: credential-provider-package + os: linux + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 + helmChart: + description: Helm chart for eks-anywhere-packages + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 + packageController: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-packages image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 + tokenRefresher: + arch: + - amd64 + - arm64 + description: Container image for ecr-token-refresher image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: ecr-token-refresher + os: linux + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 + snow: + bottlerocketBootstrapSnow: + arch: + - amd64 + - arm64 + description: Container image for bottlerocket-bootstrap-snow image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: bottlerocket-bootstrap-snow + os: linux + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-6-eks-a-v0.0.0-dev-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 + manager: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-snow-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-snow-controller + os: linux + uri: public.ecr.aws/release-container-registry/aws/cluster-api-provider-aws-snow/manager:v0.1.27-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/metadata.yaml + version: v0.1.27+abcdef1 + tinkerbell: + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-tinkerbell image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-tinkerbell + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + envoy: + arch: + - amd64 + - arm64 + description: Container image for envoy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: envoy + os: linux + uri: public.ecr.aws/release-container-registry/envoyproxy/envoy:v1.22.2.0-prod-eks-a-v0.0.0-dev-build.1 + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + tinkerbellStack: + actions: + cexec: + arch: + - amd64 + - arm64 + description: Container image for cexec image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cexec + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 + imageToDisk: + arch: + - amd64 + - arm64 + description: Container image for image2disk image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: image2disk + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 + kexec: + arch: + - amd64 + - arm64 + description: Container image for kexec image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kexec + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 + ociToDisk: + arch: + - amd64 + - arm64 + description: Container image for oci2disk image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: oci2disk + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 + reboot: + arch: + - amd64 + - arm64 + description: Container image for reboot image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: reboot + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 + writeFile: + arch: + - amd64 + - arm64 + description: Container image for writefile image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: writefile + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 + boots: + arch: + - amd64 + - arm64 + description: Container image for boots image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: boots + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/boots:v0.8.1-eks-a-v0.0.0-dev-build.1 + hegel: + arch: + - amd64 + - arm64 + description: Container image for hegel image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hegel + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 + hook: + bootkit: + arch: + - amd64 + - arm64 + description: Container image for hook-bootkit image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hook-bootkit + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 + docker: + arch: + - amd64 + - arm64 + description: Container image for hook-docker image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hook-docker + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 + initramfs: + amd: + description: Tinkerbell operating system installation environment (osie) + component + name: initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 + arm: + description: Tinkerbell operating system installation environment (osie) + component + name: initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 + kernel: + arch: + - amd64 + - arm64 + description: Container image for hook-kernel image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hook-kernel + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 + vmlinuz: + amd: + description: Tinkerbell operating system installation environment (osie) + component + name: vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 + arm: + description: Tinkerbell operating system installation environment (osie) + component + name: vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 + rufio: + arch: + - amd64 + - arm64 + description: Container image for rufio image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: rufio + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 + tink: + tinkController: + arch: + - amd64 + - arm64 + description: Container image for tink-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tink-controller + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 + tinkServer: + arch: + - amd64 + - arm64 + description: Container image for tink-server image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tink-server + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 + tinkWorker: + arch: + - amd64 + - arm64 + description: Container image for tink-worker image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tink-worker + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 + tinkerbellChart: + description: Helm chart for tinkerbell-chart + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tinkerbell-chart + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 + version: v0.5.2+abcdef1 + upgrader: + upgrader: + arch: + - amd64 + - arm64 + description: Container image for upgrader image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: upgrader + os: linux + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-6-eks-a-v0.0.0-dev-build.1 + vSphere: + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-vsphere image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-vsphere + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 + manager: + arch: + - amd64 + - arm64 + description: Container image for cloud-provider-vsphere image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cloud-provider-vsphere + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.30.0-eks-d-1-30-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 status: {} diff --git a/release/cli/pkg/operations/upload.go b/release/cli/pkg/operations/upload.go index 20926d419a7b..d5fa633a1f28 100644 --- a/release/cli/pkg/operations/upload.go +++ b/release/cli/pkg/operations/upload.go @@ -92,7 +92,7 @@ func handleArchiveUpload(_ context.Context, r *releasetypes.ReleaseConfig, artif archiveFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName) fmt.Printf("Archive - %s\n", archiveFile) key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName) - err := s3.UploadFile(archiveFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader) + err := s3.UploadFile(archiveFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -109,7 +109,7 @@ func handleArchiveUpload(_ context.Context, r *releasetypes.ReleaseConfig, artif checksumFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName) + extension fmt.Printf("Checksum - %s\n", checksumFile) key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName) + extension - err := s3.UploadFile(checksumFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader) + err := s3.UploadFile(checksumFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -122,7 +122,7 @@ func handleManifestUpload(_ context.Context, r *releasetypes.ReleaseConfig, arti manifestFile := filepath.Join(artifact.Manifest.ArtifactPath, artifact.Manifest.ReleaseName) fmt.Printf("Manifest - %s\n", manifestFile) key := filepath.Join(artifact.Manifest.ReleaseS3Path, artifact.Manifest.ReleaseName) - err := s3.UploadFile(manifestFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader) + err := s3.UploadFile(manifestFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Manifest.Private) if err != nil { return errors.Cause(err) } diff --git a/release/cli/pkg/types/types.go b/release/cli/pkg/types/types.go index ab136dc4ac1d..245ed7feb1c3 100644 --- a/release/cli/pkg/types/types.go +++ b/release/cli/pkg/types/types.go @@ -77,6 +77,7 @@ type ArchiveArtifact struct { ProjectPath string SourcedFromBranch string ImageFormat string + Private bool } type ImageArtifact struct { @@ -102,6 +103,7 @@ type ManifestArtifact struct { ProjectPath string SourcedFromBranch string Component string + Private bool } type Artifact struct { diff --git a/release/cli/pkg/util/artifacts/artifacts.go b/release/cli/pkg/util/artifacts/artifacts.go index afbbe2a01d2d..7281ae7c0a57 100644 --- a/release/cli/pkg/util/artifacts/artifacts.go +++ b/release/cli/pkg/util/artifacts/artifacts.go @@ -23,11 +23,11 @@ import ( ) func IsObjectNotFoundError(err error) bool { - return err.Error() == "Requested object not found" + return strings.Contains(err.Error(), "requested object not found") } func IsImageNotFoundError(err error) bool { - return err.Error() == "Requested image not found" + return strings.Contains(err.Error(), "requested image not found") } func GetFakeSHA(hashType int) (string, error) { diff --git a/release/cli/pkg/util/release/release.go b/release/cli/pkg/util/release/release.go index 48f74b2d80ce..1d8842a3e0e0 100644 --- a/release/cli/pkg/util/release/release.go +++ b/release/cli/pkg/util/release/release.go @@ -40,7 +40,12 @@ func GetPreviousReleaseIfExists(r *releasetypes.ReleaseConfig) (*anywherev1alpha release := &anywherev1alpha1.Release{} eksAReleaseManifestKey := r.ReleaseManifestFilepath() - if !s3.KeyExists(r.ReleaseBucket, eksAReleaseManifestKey) { + keyExists, err := s3.KeyExists(r.ReleaseClients.S3.Client, r.ReleaseBucket, eksAReleaseManifestKey, false) + if err != nil { + return nil, fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", eksAReleaseManifestKey, err) + } + + if !keyExists { return emptyRelease, nil } diff --git a/release/triggers/brew-version-release/CLI_RELEASE_VERSION b/release/triggers/brew-version-release/CLI_RELEASE_VERSION index d94dbfeb9d69..b3f4adba7b0f 100644 --- a/release/triggers/brew-version-release/CLI_RELEASE_VERSION +++ b/release/triggers/brew-version-release/CLI_RELEASE_VERSION @@ -1 +1 @@ -v0.19.1 \ No newline at end of file +v0.19.3 \ No newline at end of file diff --git a/scripts/e2e_test_docker.sh b/scripts/e2e_test_docker.sh index 61fd597e863b..278a15872126 100755 --- a/scripts/e2e_test_docker.sh +++ b/scripts/e2e_test_docker.sh @@ -36,7 +36,7 @@ fi REPO_ROOT=$(git rev-parse --show-toplevel) BIN_FOLDER=$REPO_ROOT/bin -TEST_REGEX="${1:-TestDockerKubernetes125SimpleFlow}" +TEST_REGEX="${1:-TestDockerKubernetes130SimpleFlow}" BRANCH_NAME="${2:-main}" diff --git a/scripts/golden_create_pr.sh b/scripts/golden_create_pr.sh index 2164c826df05..4032953c0f6a 100755 --- a/scripts/golden_create_pr.sh +++ b/scripts/golden_create_pr.sh @@ -43,7 +43,7 @@ git remote add upstream git@github.com:${UPSTREAM_ORG}/${REPO}.git git checkout -b $PR_BRANCH git diff -git add release/cli/pkg/test/testdata/*.yaml +git add release/cli/pkg/operations/testdata/*.yaml # If some other files get modified, the changes should be ignored git restore . FILES_ADDED=$(git diff --staged --name-only) diff --git a/test/e2e/QUICK_TESTS.yaml b/test/e2e/QUICK_TESTS.yaml index 023b7d23ca41..d3ff8a0272ed 100644 --- a/test/e2e/QUICK_TESTS.yaml +++ b/test/e2e/QUICK_TESTS.yaml @@ -1,30 +1,30 @@ quick_tests: # Docker -- TestDocker.*128 +- TestDocker.*130 # vSphere -- ^TestVSphereKubernetes128To129RedHatUpgrade$ -- TestVSphereKubernetes128To129StackedEtcdRedHatUpgrade -- ^TestVSphereKubernetes128UbuntuTo129Upgrade$ -- TestVSphereKubernetes128UbuntuTo129StackedEtcdUpgrade -- TestVSphereKubernetes128To129Ubuntu2204Upgrade -- TestVSphereKubernetes128To129Ubuntu2204StackedEtcdUpgrade -- TestVSphereKubernetes129Ubuntu2004To2204Upgrade -- TestVSphereKubernetes128BottlerocketTo129Upgrade -- TestVSphereKubernetes128BottlerocketTo129StackedEtcdUpgrade +- ^TestVSphereKubernetes129To130RedHatUpgrade$ +- TestVSphereKubernetes129To130StackedEtcdRedHatUpgrade +- ^TestVSphereKubernetes129UbuntuTo130Upgrade$ +- TestVSphereKubernetes129UbuntuTo130StackedEtcdUpgrade +- TestVSphereKubernetes129To130Ubuntu2204Upgrade +- TestVSphereKubernetes129To130Ubuntu2204StackedEtcdUpgrade +- TestVSphereKubernetes130Ubuntu2004To2204Upgrade +- TestVSphereKubernetes129BottlerocketTo130Upgrade +- TestVSphereKubernetes129BottlerocketTo130StackedEtcdUpgrade # CloudStack -- TestCloudStackKubernetes128To129RedhatMultipleFieldsUpgrade -- TestCloudStackKubernetes128To129StackedEtcdRedhatMultipleFieldsUpgrade +- TestCloudStackKubernetes129To130RedhatMultipleFieldsUpgrade +- TestCloudStackKubernetes129To130StackedEtcdRedhatMultipleFieldsUpgrade # Nutanix -- TestNutanixKubernetes128to129RedHat9Upgrade -- TestNutanixKubernetes128to129StackedEtcdRedHat9Upgrade -- TestNutanixKubernetes128to129RedHat8Upgrade -- TestNutanixKubernetes128to129StackedEtcdRedHat8Upgrade -- TestNutanixKubernetes128To129UbuntuUpgrade -- TestNutanixKubernetes128To129StackedEtcdUbuntuUpgrade +- TestNutanixKubernetes129to130RedHat9Upgrade +- TestNutanixKubernetes129to130StackedEtcdRedHat9Upgrade +- TestNutanixKubernetes129to130RedHat8Upgrade +- TestNutanixKubernetes129to130StackedEtcdRedHat8Upgrade +- TestNutanixKubernetes129To130UbuntuUpgrade +- TestNutanixKubernetes129To130StackedEtcdUbuntuUpgrade # Snow # - TestSnowKubernetes128SimpleFlow # - TestSnowKubernetes128StackedEtcdSimpleFlow # Tinkerbell -- ^TestTinkerbellKubernetes128UbuntuTo129Upgrade$ -- TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade -- TestTinkerbellKubernetes128To129Ubuntu2204Upgrade +- ^TestTinkerbellKubernetes129UbuntuTo130Upgrade$ +- TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade +- TestTinkerbellKubernetes129To130Ubuntu2204Upgrade \ No newline at end of file diff --git a/test/e2e/README.md b/test/e2e/README.md index 41e3eb87fd20..553475361c03 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -11,8 +11,8 @@ or # # The makefile will include the .env file and export all the vars to the environment for you # -# By default the local-e2e target will run TestDockerKubernetes125SimpleFlow. You can either -# override LOCAL_E2E_TESTS in your .env file or pass it on the cli every time (i.e LOCAL_E2E_TESTS=TestDockerKubernetes125SimpleFlow) +# By default the local-e2e target will run TestDockerKubernetes130SimpleFlow. You can either +# override LOCAL_E2E_TESTS in your .env file or pass it on the cli every time (i.e LOCAL_E2E_TESTS=TestDockerKubernetes130SimpleFlow) make local-e2e ``` or @@ -45,7 +45,7 @@ In order to use bundle overrides, take your bundle overrides yaml file and move You will also need to set the environment variable `T_BUNDLES_OVERRIDE=true` ### Cleaning up VM's after a test run -In order to clean up VM's after a test runs automatically, set `T_CLEANUP_VMS=true` +In order to clean up VM's after a test runs automatically, set `T_CLEANUP_RESOURCES=true` ## VSphere tests requisites The following env variables need to be set: diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index 869f4df94866..28b47ffd9bb0 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -1,21 +1,30 @@ skipped_tests: +# Docker +# Skipping 1.30 curated packages tests until we add support +- TestDockerKubernetes130CuratedPackagesSimpleFlow +- TestDockerKubernetes130CuratedPackagesAdotSimpleFlow +- TestDockerKubernetes130CuratedPackagesEmissarySimpleFlow +- TestDockerKubernetes130CuratedPackagesHarborSimpleFlow +- TestDockerKubernetes130CuratedPackagesPrometheusSimpleFlow +- TestDockerKubernetes130CuratedPackagesMetalLB + # CloudStack #Airgapped tests -- TestCloudStackKubernetes125RedhatAirgappedRegistryMirror - TestCloudStackKubernetes126RedhatAirgappedRegistryMirror +- TestCloudStackKubernetes128RedhatAirgappedProxy # Proxy tests -- TestCloudStackKubernetes125RedhatProxyConfigAPI - TestCloudStackKubernetes126RedhatProxyConfigAPI - TestCloudStackKubernetes127RedhatProxyConfigAPI - TestCloudStackKubernetes128RedhatProxyConfigAPI +- TestCloudStackKubernetes130RedhatProxyConfigAPI # MultiEndpoint -- TestCloudStackKubernetes125MultiEndpointSimpleFlow - TestCloudStackKubernetes126MultiEndpointSimpleFlow - TestCloudStackKubernetes127MultiEndpointSimpleFlow - TestCloudStackKubernetes128MultiEndpointSimpleFlow - TestCloudStackKubernetes129MultiEndpointSimpleFlow +- TestCloudStackKubernetes130MultiEndpointSimpleFlow # Side effects - TestCloudStackKubernetes129WithOIDCManagementClusterUpgradeFromLatestSideEffects @@ -25,6 +34,47 @@ skipped_tests: # Nutanix +# Curated packages test for new K8s version +- TestVSphereKubernetes130CuratedPackagesSimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesSimpleFlow +- TestVSphereKubernetes130CuratedPackagesEmissarySimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesEmissarySimpleFlow +- TestVSphereKubernetes130CuratedPackagesHarborSimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesHarborSimpleFlow +- TestVSphereKubernetes130CuratedPackagesAdotUpdateFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesAdotUpdateFlow +- TestVSphereKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow +- TestVSphereKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow +- TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesSimpleFlow +- TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow +- TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow +- TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesPrometheusSimpleFlow +- TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesSimpleFlow +- TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesEmissarySimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesHarborSimpleFlow +- TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesPrometheusSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesAdotUpdateFlow +- TestCloudStackKubernetes130RedHatCuratedPackagesClusterAutoscalerSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesAdotSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesCertManagerSimpleFlow +- TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesEmissarySimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesHarborSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesAdotSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow + +# UpgradeFromLatestTests for new K8s version (expected to work only after the release is out) +- TestDockerKubernetes130AirgappedUpgradeFromLatestRegistryMirrorAndCert +- TestDockerKubernetes129to130UpgradeFromLatestMinorReleaseAPI +- TestCloudStackKubernetes130WithOIDCManagementClusterUpgradeFromLatestSideEffects + # Snow - TestSnowKubernetes125SimpleFlow - TestSnowKubernetes126SimpleFlow @@ -65,21 +115,25 @@ skipped_tests: - TestTinkerbellKubernetes126UbuntuExternalEtcdSimpleFlow # Skipping skip power action tests - Not going to work because e2e test powers on CP and worker node at the same time and worker node times out early waiting for ipxe # Skipping a few redundant tests -- TestTinkerbellKubernetes125RedHatSimpleFlow - TestTinkerbellKubernetes126RedHatSimpleFlow - TestTinkerbellKubernetes127RedHatSimpleFlow - TestTinkerbellKubernetes128RedHatSimpleFlow - TestTinkerbellKubernetes129RedHatSimpleFlow -- TestTinkerbellKubernetes125UbuntuSimpleFlow +- TestTinkerbellKubernetes130RedHatSimpleFlow - TestTinkerbellKubernetes126UbuntuSimpleFlow -- TestTinkerbellKubernetes125Ubuntu2204SimpleFlow +- TestTinkerbellKubernetes127UbuntuSimpleFlow - TestTinkerbellKubernetes126Ubuntu2204SimpleFlow -- TestTinkerbellKubernetes125To126Ubuntu2204Upgrade +- TestTinkerbellKubernetes127Ubuntu2204SimpleFlow +- TestTinkerbellKubernetes126To127Ubuntu2204Upgrade - TestTinkerbellKubernetes126Ubuntu2004To2204Upgrade -- TestTinkerbellKubernetes126UbuntuThreeWorkersSimpleFlow -- TestTinkerbellKubernetes125UbuntuWorkerNodeScaleUpWithAPI +- TestTinkerbellKubernetes127Ubuntu2004To2204Upgrade - TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI - TestTinkerbellSingleNode125ManagementScaleupWorkloadWithAPI +#Skip single K8s version upgrade tests as the same is covered by multiple K8s version upgrade from 1.25 to 1.29 to save on hardware resources and running time +- TestTinkerbellKubernetes125UbuntuTo126Upgrade +- TestTinkerbellKubernetes126UbuntuTo127Upgrade +- TestTinkerbellKubernetes127UbuntuTo128Upgrade +- TestTinkerbellKubernetes128UbuntuTo129Upgrade # Tinkerbell Packages # Skip test cases for packages other than hello-eks-anywhere and not for K 1.28. - TestTinkerbellKubernetes126UbuntuSingleNodeCuratedPackagesEmissaryFlow @@ -94,9 +148,18 @@ skipped_tests: - TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesHarborFlow - TestTinkerbellKubernetes127UbuntuCuratedPackagesAdotSimpleFlow - TestTinkerbellKubernetes127UbuntuCuratedPackagesPrometheusSimpleFlow -- TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesFlow -- TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesEmissaryFlow -- TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesHarborFlow -- TestTinkerbellKubernetes129UbuntuCuratedPackagesAdotSimpleFlow -- TestTinkerbellKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow -- TestTinkerbellKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow +- TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesFlow +- TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesEmissaryFlow +- TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesHarborFlow +- TestTinkerbellKubernetes130UbuntuCuratedPackagesAdotSimpleFlow +- TestTinkerbellKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow +- TestTinkerbellKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow + +# Tinkerbell conformance +- TestTinkerbellKubernetes125BottleRocketThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes126BottleRocketThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes125ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes126ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes127ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes128ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes129ThreeReplicasTwoWorkersConformanceFlow diff --git a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml index 4cae24499de4..74e723771283 100644 --- a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml +++ b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml @@ -1,7 +1,7 @@ -TestTinkerbellKubernetes129AWSIamAuth: 2 +TestTinkerbellKubernetes130AWSIamAuth: 2 TestTinkerbellKubernetes128BottleRocketAWSIamAuth: 2 -TestTinkerbellKubernetes129UbuntuWorkerNodeUpgrade: 3 -TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUpWithAPI: 3 +TestTinkerbellKubernetes130UbuntuWorkerNodeUpgrade: 3 +TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUpWithAPI: 3 TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI: 2 TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesFlow: 1 TestTinkerbellKubernetes127BottleRocketSingleNodeCuratedPackagesFlow: 1 @@ -34,96 +34,101 @@ TestTinkerbellKubernetes125UbuntuCuratedPackagesAdotSimpleFlow: 1 TestTinkerbellKubernetes125BottleRocketCuratedPackagesAdotSimpleFlow: 1 TestTinkerbellKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow: 1 TestTinkerbellKubernetes125BottleRocketCuratedPackagesPrometheusSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesFlow: 1 +TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesFlow: 1 TestTinkerbellKubernetes128BottleRocketSingleNodeCuratedPackagesFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesEmissaryFlow: 1 +TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesEmissaryFlow: 1 TestTinkerbellKubernetes128BottleRocketSingleNodeCuratedPackagesEmissaryFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesHarborFlow: 1 +TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesHarborFlow: 1 TestTinkerbellKubernetes128BottleRocketSingleNodeCuratedPackagesHarborFlow: 1 -TestTinkerbellKubernetes129UbuntuCuratedPackagesAdotSimpleFlow: 1 +TestTinkerbellKubernetes130UbuntuCuratedPackagesAdotSimpleFlow: 1 TestTinkerbellKubernetes128BottleRocketCuratedPackagesAdotSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow: 1 +TestTinkerbellKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow: 1 TestTinkerbellKubernetes128BottleRocketCuratedPackagesPrometheusSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow: 3 +TestTinkerbellKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow: 3 TestTinkerbellKubernetes128BottleRocketSingleNodeSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuWorkloadCluster: 4 -TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI: 4 -TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI: 4 +TestTinkerbellKubernetes130UbuntuSingleNodeSimpleFlow: 1 +TestTinkerbellKubernetes130UbuntuWorkloadCluster: 4 +TestTinkerbellKubernetes130UbuntuWorkloadClusterWithAPI: 4 +TestTinkerbellKubernetes130UbuntuWorkloadClusterGitFluxWithAPI: 4 TestTinkerbellKubernetes128BottlerocketWorkloadClusterSimpleFlow: 4 TestTinkerbellKubernetes128BottlerocketWorkloadClusterWithAPI: 4 -TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster: 2 -TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI: 2 +TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadCluster: 2 +TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadClusterWithAPI: 2 TestTinkerbellKubernetes128BottlerocketSingleNodeWorkloadCluster: 2 TestTinkerbellKubernetes128BottlerocketSingleNodeWorkloadClusterWithAPI: 2 TestTinkerbellKubernetes128BottlerocketWorkloadClusterSkipPowerActions: 4 -TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleup: 5 +TestTinkerbellUpgrade130MulticlusterWorkloadClusterWorkerScaleup: 5 TestTinkerbellSingleNode125ManagementScaleupWorkloadWithAPI: 4 -TestTinkerbellKubernetes125UbuntuTo126Upgrade: 4 TestTinkerbellKubernetes126UbuntuTo127Upgrade: 4 TestTinkerbellKubernetes127UbuntuTo128Upgrade: 4 TestTinkerbellKubernetes128UbuntuTo129Upgrade: 4 -TestTinkerbellKubernetes125To126Ubuntu2204Upgrade: 4 +TestTinkerbellKubernetes129UbuntuTo130Upgrade: 4 TestTinkerbellKubernetes126To127Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes127To128Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes128To129Ubuntu2204Upgrade: 4 +TestTinkerbellKubernetes128To129Ubuntu2204RTOSUpgrade: 4 +TestTinkerbellKubernetes129To130Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes126Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes127Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes128Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade: 4 -TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI: 4 -TestTinkerbellUpgrade129MulticlusterWorkloadClusterCPScaleup: 6 -TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129: 6 +TestTinkerbellKubernetes129Ubuntu2004To2204RTOSUpgrade: 4 +TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade: 4 +TestTinkerbellUpgrade130MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI: 4 +TestTinkerbellUpgrade130MulticlusterWorkloadClusterCPScaleup: 6 +TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade129To130: 6 TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade125To126WithAPI: 4 -TestTinkerbellKubernetes129OIDC: 2 -TestTinkerbellKubernetes129UbuntuRegistryMirror: 2 -TestTinkerbellKubernetes129UbuntuInsecureSkipVerifyRegistryMirror: 2 +TestTinkerbellKubernetes130OIDC: 2 +TestTinkerbellKubernetes130UbuntuRegistryMirror: 2 +TestTinkerbellKubernetes130UbuntuInsecureSkipVerifyRegistryMirror: 2 TestTinkerbellKubernetes128BottlerocketRegistryMirror: 2 -TestTinkerbellKubernetes129UbuntuAuthenticatedRegistryMirror: 2 +TestTinkerbellKubernetes130UbuntuAuthenticatedRegistryMirror: 2 TestTinkerbellKubernetes128BottlerocketAuthenticatedRegistryMirror: 2 -TestTinkerbellKubernetes125UbuntuSimpleFlow: 2 TestTinkerbellKubernetes126UbuntuSimpleFlow: 2 TestTinkerbellKubernetes127UbuntuSimpleFlow: 2 TestTinkerbellKubernetes128UbuntuSimpleFlow: 2 TestTinkerbellKubernetes129UbuntuSimpleFlow: 2 -TestTinkerbellKubernetes125Ubuntu2204SimpleFlow: 2 +TestTinkerbellKubernetes130UbuntuSimpleFlow: 2 TestTinkerbellKubernetes126Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes127Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes128Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes129Ubuntu2204SimpleFlow: 2 -TestTinkerbellKubernetes125RedHatSimpleFlow: 2 +TestTinkerbellKubernetes129Ubuntu2204RTOSSimpleFlow: 2 +TestTinkerbellKubernetes130Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes126RedHatSimpleFlow: 2 TestTinkerbellKubernetes127RedHatSimpleFlow: 2 TestTinkerbellKubernetes128RedHatSimpleFlow: 2 TestTinkerbellKubernetes129RedHatSimpleFlow: 2 +TestTinkerbellKubernetes130RedHatSimpleFlow: 2 TestTinkerbellKubernetes125BottleRocketSimpleFlow: 2 TestTinkerbellKubernetes126BottleRocketSimpleFlow: 2 TestTinkerbellKubernetes127BottleRocketSimpleFlow: 2 TestTinkerbellKubernetes128BottleRocketSimpleFlow: 2 -TestTinkerbellKubernetes129UbuntuThreeControlPlaneReplicasSimpleFlow: 4 +TestTinkerbellKubernetes130UbuntuThreeControlPlaneReplicasSimpleFlow: 4 TestTinkerbellKubernetes128BottleRocketThreeControlPlaneReplicasSimpleFlow: 4 -TestTinkerbellKubernetes129UbuntuThreeWorkersSimpleFlow: 4 +TestTinkerbellKubernetes130UbuntuThreeWorkersSimpleFlow: 4 TestTinkerbellKubernetes128BottleRocketThreeWorkersSimpleFlow: 4 -TestTinkerbellKubernetes129UbuntuControlPlaneScaleUp: 4 -TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUp: 3 -TestTinkerbellKubernetes129UbuntuWorkerNodeScaleDown: 3 -TestTinkerbellKubernetes129UbuntuControlPlaneScaleDown: 4 -TestTinkerbellKubernetes129UbuntuWorkerNodeGroupsTaintsAndLabels: 3 +TestTinkerbellKubernetes130UbuntuControlPlaneScaleUp: 4 +TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUp: 3 +TestTinkerbellKubernetes130UbuntuWorkerNodeScaleDown: 3 +TestTinkerbellKubernetes130UbuntuControlPlaneScaleDown: 4 +TestTinkerbellKubernetes130UbuntuWorkerNodeGroupsTaintsAndLabels: 3 TestTinkerbellKubernetes128BottlerocketWorkerNodeGroupsTaintsAndLabels: 3 TestTinkerbellAirgappedKubernetes128BottleRocketRegistryMirror: 2 TestTinkerbellAirgappedKubernetes128BottlerocketProxyConfigFlow: 2 -TestTinkerbellAirgappedKubernetes129UbuntuProxyConfigFlow: 2 -TestTinkerbellKubernetes129UbuntuOOB: 2 -TestTinkerbellK8sUpgrade128to129WithUbuntuOOB: 4 -TestTinkerbellKubernetes128UbuntuTo129UpgradeCPOnly: 3 -TestTinkerbellKubernetes127UbuntuTo128UpgradeWorkerOnly: 3 -TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI: 4 -TestTinkerbellKubernetes125UbuntuTo126InPlaceUpgrade_1CP_1Worker: 2 +TestTinkerbellAirgappedKubernetes130UbuntuProxyConfigFlow: 2 +TestTinkerbellKubernetes130UbuntuOOB: 2 +TestTinkerbellK8sUpgrade129to130WithUbuntuOOB: 4 +TestTinkerbellKubernetes129UbuntuTo130UpgradeCPOnly: 3 +TestTinkerbellKubernetes129UbuntuTo130UpgradeWorkerOnly: 3 +TestTinkerbellSingleNode129To130UbuntuManagementCPUpgradeAPI: 4 TestTinkerbellKubernetes126UbuntuTo127InPlaceUpgrade_1CP_2Worker: 3 TestTinkerbellKubernetes127UbuntuTo128InPlaceUpgrade_3CP_1Worker: 4 TestTinkerbellKubernetes128UbuntuTo129InPlaceUpgrade_1CP_1Worker: 2 -TestTinkerbellKubernetes125UbuntuTo126SingleNodeInPlaceUpgrade: 1 +TestTinkerbellKubernetes129UbuntuTo130InPlaceUpgrade_1CP_1Worker: 2 TestTinkerbellKubernetes126UbuntuTo127SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes127UbuntuTo128SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes128UbuntuTo129SingleNodeInPlaceUpgrade: 1 -TestTinkerbellKubernetes128UpgradeManagementComponents: 2 \ No newline at end of file +TestTinkerbellKubernetes129UbuntuTo130SingleNodeInPlaceUpgrade: 1 +TestTinkerbellKubernetes128UpgradeManagementComponents: 2 +TestTinkerbellKubernetes125UbuntuTo129MultipleUpgrade: 4 \ No newline at end of file diff --git a/test/e2e/airgap.go b/test/e2e/airgap.go index 470653ded6fd..f0ee7d286849 100644 --- a/test/e2e/airgap.go +++ b/test/e2e/airgap.go @@ -22,7 +22,8 @@ const ( bundleReleasePathFromArtifacts = "./eks-anywhere-downloads/bundle-release.yaml" ) -// runAirgapConfigFlow run airgap deployment but allow bootstrap cluster to access local peers. +// runAirgapConfigFlow runs an airgap deployment workflow with a registry mirror configuration, +// and allows bootstrap cluster to access local peers func runAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs string) { test.GenerateClusterConfig() test.DownloadArtifacts() @@ -39,6 +40,22 @@ func runAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs string) { test.DeleteCluster() } +// runAirgapConfigProxyFlow runs an airgapped deployment workflow with a proxy configuration, +// and allows bootstrap cluster to access local peers. +func runAirgapConfigProxyFlow(test *framework.ClusterE2ETest, localCIDRs string) { + test.GenerateClusterConfig() + test.DownloadArtifacts() + test.ExtractDownloadedArtifacts() + test.AirgapDockerContainers(localCIDRs) + test.CreateAirgappedUser(localCIDRs) + test.AssertAirgappedNetwork() + test.CreateCluster( + framework.WithSudo(airgapUsername), + framework.WithBundlesOverride(bundleReleasePathFromArtifacts), // generated by ExtractDownloadArtifacts + ) + test.DeleteCluster() +} + func runTinkerbellAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs, kubeVersion string) { test.DownloadArtifacts() test.ExtractDownloadedArtifacts() diff --git a/test/e2e/autoscaler.go b/test/e2e/autoscaler.go index cf62a65ad926..f521c17431f0 100644 --- a/test/e2e/autoscaler.go +++ b/test/e2e/autoscaler.go @@ -13,7 +13,7 @@ func runAutoscalerWithMetricsServerSimpleFlow(test *framework.ClusterE2ETest) { metricServerName := "metrics-server" targetNamespace := "eksa-packages" test.InstallAutoScalerWithMetricServer(targetNamespace) - test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withMgmtCluster(test)) + test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withCluster(test)) }) } @@ -25,7 +25,7 @@ func runAutoscalerWithMetricsServerTinkerbellSimpleFlow(test *framework.ClusterE metricServerName := "metrics-server" targetNamespace := "eksa-packages" test.InstallAutoScalerWithMetricServer(targetNamespace) - test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withMgmtCluster(test)) + test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withCluster(test)) test.DeleteCluster() test.ValidateHardwareDecommissioned() } diff --git a/test/e2e/certmanager.go b/test/e2e/certmanager.go index 4fd44856c7c4..0efe4c92fb08 100644 --- a/test/e2e/certmanager.go +++ b/test/e2e/certmanager.go @@ -4,12 +4,9 @@ package e2e import ( - "fmt" - "path/filepath" "time" "github.com/aws/eks-anywhere/pkg/kubeconfig" - "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/test/framework" ) @@ -30,18 +27,11 @@ func runCertManagerRemoteClusterInstallSimpleFlow(test *framework.MulticlusterE2 packagePrefix := "test" packageFile := e.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace) test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName)) - e.VerifyCertManagerPackageInstalled(packagePrefix, EksaPackagesNamespace, cmPackageName, withMgmtClusterSetup(test.ManagementCluster)) - e.CleanupCerts(withMgmtClusterSetup(test.ManagementCluster)) + e.VerifyCertManagerPackageInstalled(packagePrefix, EksaPackagesNamespace, cmPackageName, withCluster(test.ManagementCluster)) + e.CleanupCerts(withCluster(test.ManagementCluster)) e.DeleteClusterWithKubectl() e.ValidateClusterDelete() }) time.Sleep(5 * time.Minute) test.DeleteManagementCluster() } - -func withMgmtClusterSetup(cluster *framework.ClusterE2ETest) *types.Cluster { - return &types.Cluster{ - Name: cluster.ClusterName, - KubeconfigFile: filepath.Join(cluster.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", cluster.ClusterName)), - } -} diff --git a/test/e2e/cloudstack_test.go b/test/e2e/cloudstack_test.go index 560a4f96b3b6..4c0842220670 100644 --- a/test/e2e/cloudstack_test.go +++ b/test/e2e/cloudstack_test.go @@ -18,14 +18,14 @@ import ( ) // APIServerExtraArgs -func TestCloudStackKubernetes129RedHat8APIServerExtraArgsSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHat8APIServerExtraArgsSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat129()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), framework.WithEnvVar(features.APIServerExtraArgsEnabledEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneAPIServerExtraArgs(), ), ) @@ -33,13 +33,13 @@ func TestCloudStackKubernetes129RedHat8APIServerExtraArgsSimpleFlow(t *testing.T } // TODO: Investigate why this test takes long time to pass with service-account-issuer flag -func TestCloudStackKubernetes129Redhat8APIServerExtraArgsUpgradeFlow(t *testing.T) { +func TestCloudStackKubernetes130Redhat8APIServerExtraArgsUpgradeFlow(t *testing.T) { var addAPIServerExtraArgsclusterOpts []framework.ClusterE2ETestOpt var removeAPIServerExtraArgsclusterOpts []framework.ClusterE2ETestOpt test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat129()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithEnvVar(features.APIServerExtraArgsEnabledEnvVar, "true"), ) addAPIServerExtraArgsclusterOpts = append( @@ -62,16 +62,6 @@ func TestCloudStackKubernetes129Redhat8APIServerExtraArgsUpgradeFlow(t *testing. } // AWS IAM Auth -func TestCloudStackKubernetes125AWSIamAuth(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runAWSIamAuthFlow(test) -} - func TestCloudStackKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -112,20 +102,14 @@ func TestCloudStackKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestCloudStackKubernetes125to126AWSIamAuthUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runUpgradeFlowWithAWSIamAuth( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) + runAWSIamAuthFlow(test) } func TestCloudStackKubernetes126to127AWSIamAuthUpgrade(t *testing.T) { @@ -176,20 +160,23 @@ func TestCloudStackKubernetes128to129AWSIamAuthUpgrade(t *testing.T) { ) } -// Curated packages test -func TestCloudStackKubernetes125RedhatCuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) +func TestCloudStackKubernetes129to130AWSIamAuthUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - "my-packages-test", EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + provider, + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runUpgradeFlowWithAWSIamAuth( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) - runCuratedPackageInstallSimpleFlow(test) } +// Curated packages test func TestCloudStackKubernetes126RedhatCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest( @@ -242,17 +229,17 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesSimpleFlow(t *testing.T) { runCuratedPackageInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageEmissaryInstallSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -307,17 +294,17 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesEmissarySimpleFlow(t *testi runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) + runCuratedPackageEmissaryInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) { @@ -372,11 +359,17 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesHarborSimpleFlow(t *testing runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestCloudStackKubernetes125RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageRemoteClusterInstallSimpleFlow(test) + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + "my-packages-test", EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } func TestCloudStackKubernetes126RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { @@ -407,11 +400,11 @@ func TestCloudStackKubernetes129RedhatWorkloadClusterCuratedPackagesSimpleFlow(t runCuratedPackageRemoteClusterInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageRemoteClusterInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -442,12 +435,11 @@ func TestCloudStackKubernetes129RedhatWorkloadClusterCuratedPackagesEmissarySimp runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - framework.CheckCertManagerCredentials(t) - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCertManagerRemoteClusterInstallSimpleFlow(test) + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) { @@ -482,16 +474,12 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesCertManagerSimpleFlow(t *te runCertManagerRemoteClusterInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - "my-packages-test", EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackagesAdotInstallSimpleFlow(test) + framework.CheckCertManagerCredentials(t) + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCertManagerRemoteClusterInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) { @@ -542,16 +530,16 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesAdotSimpleFlow(t *testing.T runCuratedPackagesAdotInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesAdotInstallUpdateFlow(test) + runCuratedPackagesAdotInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) { @@ -602,19 +590,16 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesAdotUpdateFlow(t *testing.T runCuratedPackagesAdotInstallUpdateFlow(test) } -func TestCloudStackKubernetes125RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { - minNodes := 1 - maxNodes := 2 +func TestCloudStackKubernetes130RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + test := framework.NewClusterE2ETest(t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runAutoscalerWithMetricsServerSimpleFlow(test) + runCuratedPackagesAdotInstallUpdateFlow(test) } func TestCloudStackKubernetes126RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { @@ -677,16 +662,19 @@ func TestCloudStackKubernetes129RedHatCuratedPackagesClusterAutoscalerSimpleFlow runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { + minNodes := 1 + maxNodes := 2 framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - "my-packages-test", EksaPackageControllerHelmURI, + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) + runAutoscalerWithMetricsServerSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) { @@ -737,15 +725,27 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesPrometheusSimpleFlow(t *tes runCuratedPackagesPrometheusInstallSimpleFlow(test) } +func TestCloudStackKubernetes130RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + "my-packages-test", EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesPrometheusInstallSimpleFlow(test) +} + // Download artifacts func TestCloudStackDownloadArtifacts(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat128()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runDownloadArtifactsFlow(test) } @@ -753,27 +753,15 @@ func TestCloudStackDownloadArtifacts(t *testing.T) { func TestCloudStackRedhat9DownloadArtifacts(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runDownloadArtifactsFlow(test) } -func TestCloudStackKubernetes125GithubFlux(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithFluxGithub(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runFluxFlow(test) -} - func TestCloudStackKubernetes126GithubFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()), @@ -822,11 +810,11 @@ func TestCloudStackKubernetes129GithubFlux(t *testing.T) { runFluxFlow(test) } -func TestCloudStackKubernetes125GitFlux(t *testing.T) { +func TestCloudStackKubernetes130GithubFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithFluxGit(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithFluxGithub(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -882,22 +870,16 @@ func TestCloudStackKubernetes129GitFlux(t *testing.T) { runFluxFlow(test) } -func TestCloudStackKubernetes125To126GitFluxUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130GitFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, - provider, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithFluxGit(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runUpgradeFlowWithFlux( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), - ) + runFluxFlow(test) } func TestCloudStackKubernetes126To127GitFluxUpgrade(t *testing.T) { @@ -936,20 +918,21 @@ func TestCloudStackKubernetes127To128GitFluxUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125InstallGitFluxDuringUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129To130GitFluxUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest(t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runUpgradeFlowWithFlux( test, - v1alpha1.Kube125, - framework.WithFluxGit(), - framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } @@ -1021,6 +1004,23 @@ func TestCloudStackKubernetes129InstallGitFluxDuringUpgrade(t *testing.T) { ) } +func TestCloudStackKubernetes130InstallGitFluxDuringUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := framework.NewClusterE2ETest(t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFlowWithFlux( + test, + v1alpha1.Kube130, + framework.WithFluxGit(), + framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)), + ) +} + func TestCloudStackKubernetes128UpgradeManagementComponents(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) @@ -1028,14 +1028,14 @@ func TestCloudStackKubernetes128UpgradeManagementComponents(t *testing.T) { } // Labels -func TestCloudStackKubernetes125LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes126LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes125(), + framework.WithCloudStackRedhat9Kubernetes126(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1050,14 +1050,14 @@ func TestCloudStackKubernetes125LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes126LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes127LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes126(), + framework.WithCloudStackRedhat9Kubernetes127(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1072,14 +1072,14 @@ func TestCloudStackKubernetes126LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes127LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes128LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes127(), + framework.WithCloudStackRedhat9Kubernetes128(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1094,14 +1094,14 @@ func TestCloudStackKubernetes127LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes128LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes129LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes128(), + framework.WithCloudStackRedhat9Kubernetes129(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1116,14 +1116,14 @@ func TestCloudStackKubernetes128LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes129LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes130LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes129(), + framework.WithCloudStackRedhat9Kubernetes130(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1138,14 +1138,14 @@ func TestCloudStackKubernetes129LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes125RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat125ProviderWithLabels(t) +func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat126ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1154,7 +1154,7 @@ func TestCloudStackKubernetes125RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube126, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1164,14 +1164,14 @@ func TestCloudStackKubernetes125RedhatLabelsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat126ProviderWithLabels(t) +func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat127ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1180,7 +1180,7 @@ func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube127, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1190,14 +1190,14 @@ func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat127ProviderWithLabels(t) +func TestCloudStackKubernetes128RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat128ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1206,7 +1206,7 @@ func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube127, + v1alpha1.Kube128, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1216,14 +1216,14 @@ func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes128RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat128ProviderWithLabels(t) +func TestCloudStackKubernetes130RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat130ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1232,7 +1232,7 @@ func TestCloudStackKubernetes128RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1322,16 +1322,36 @@ func redhat128ProviderWithLabels(t *testing.T) *framework.CloudStack { ) } +func redhat130ProviderWithLabels(t *testing.T) *framework.CloudStack { + return framework.NewCloudStack(t, + framework.WithCloudStackWorkerNodeGroup( + worker0, + framework.WithWorkerNodeGroup(worker0, api.WithCount(2), + api.WithLabel(key1, val2)), + ), + framework.WithCloudStackWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithCloudStackWorkerNodeGroup( + worker2, + framework.WithWorkerNodeGroup(worker2, api.WithCount(1), + api.WithLabel(key2, val2)), + ), + framework.WithCloudStackRedhat9Kubernetes130(), + ) +} + // Multicluster -func TestCloudStackKubernetes125MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1341,7 +1361,7 @@ func TestCloudStackKubernetes125MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1351,15 +1371,15 @@ func TestCloudStackKubernetes125MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()) +func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes127()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1369,7 +1389,7 @@ func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1379,15 +1399,15 @@ func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes127()) +func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1397,7 +1417,7 @@ func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1407,15 +1427,15 @@ func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) +func TestCloudStackKubernetes129MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1425,7 +1445,7 @@ func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1435,15 +1455,15 @@ func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes129MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) +func TestCloudStackKubernetes130MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1453,7 +1473,7 @@ func TestCloudStackKubernetes129MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1583,9 +1603,44 @@ func TestCloudStackUpgradeKubernetes128MulticlusterWorkloadClusterWithGithubFlux ) } -func TestCloudStackKubernetes125WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) { - cloudstack := framework.NewCloudStack(t) - runTestManagementClusterUpgradeSideEffects(t, cloudstack, framework.RedHat9, anywherev1.Kube125) +func TestCloudStackUpgradeKubernetes130MulticlusterWorkloadClusterWithGithubFlux(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) + test := framework.NewMulticlusterE2ETest( + t, + framework.NewClusterE2ETest( + t, + provider, + framework.WithFluxGithub(), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + framework.NewClusterE2ETest( + t, + provider, + framework.WithFluxGithub(), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + ) + runWorkloadClusterFlowWithGitOps( + test, + framework.WithClusterUpgradeGit( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithControlPlaneCount(3), + api.WithWorkerNodeCount(3), + ), + provider.WithProviderUpgradeGit( + provider.Redhat9Kubernetes130Template(), + ), + ) } func TestCloudStackKubernetes126WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) { @@ -1608,20 +1663,12 @@ func TestCloudStackKubernetes129WithOIDCManagementClusterUpgradeFromLatestSideEf runTestManagementClusterUpgradeSideEffects(t, cloudstack, framework.RedHat9, anywherev1.Kube129) } -// OIDC -func TestCloudStackKubernetes125OIDC(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runOIDCFlow(test) +func TestCloudStackKubernetes130WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) { + cloudstack := framework.NewCloudStack(t) + runTestManagementClusterUpgradeSideEffects(t, cloudstack, framework.RedHat9, anywherev1.Kube130) } +// OIDC func TestCloudStackKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1674,23 +1721,17 @@ func TestCloudStackKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } -func TestCloudStackKubernetes125To126OIDCUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runUpgradeFlowWithOIDC( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), - ) + runOIDCFlow(test) } func TestCloudStackKubernetes126To127OIDCUpgrade(t *testing.T) { @@ -1712,20 +1753,26 @@ func TestCloudStackKubernetes126To127OIDCUpgrade(t *testing.T) { ) } -// Proxy config -func TestCloudStackKubernetes125RedhatProxyConfig(t *testing.T) { +func TestCloudStackKubernetes129To130OIDCUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), + provider, + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithProxy(framework.CloudstackProxyRequiredEnvVars), ) - runProxyConfigFlow(test) + runUpgradeFlowWithOIDC( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), + ) } +// Proxy config func TestCloudStackKubernetes126RedhatProxyConfig(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1778,8 +1825,21 @@ func TestCloudStackKubernetes129RedhatProxyConfig(t *testing.T) { runProxyConfigFlow(test) } +func TestCloudStackKubernetes130RedhatProxyConfig(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.CloudstackProxyRequiredEnvVars), + ) + runProxyConfigFlow(test) +} + // Proxy config multicluster -func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1789,7 +1849,7 @@ func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes125(), + cloudstack.WithRedhat9Kubernetes126(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1803,7 +1863,7 @@ func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes125(), + cloudstack.WithRedhat9Kubernetes126(), ), ) @@ -1823,7 +1883,7 @@ func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { test.DeleteManagementCluster() } -func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1833,7 +1893,7 @@ func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes126(), + cloudstack.WithRedhat9Kubernetes127(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1847,7 +1907,7 @@ func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes126(), + cloudstack.WithRedhat9Kubernetes127(), ), ) @@ -1867,7 +1927,7 @@ func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { test.DeleteManagementCluster() } -func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1877,7 +1937,7 @@ func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes127(), + cloudstack.WithRedhat9Kubernetes128(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1891,7 +1951,7 @@ func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes127(), + cloudstack.WithRedhat9Kubernetes128(), ), ) @@ -1911,7 +1971,7 @@ func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { test.DeleteManagementCluster() } -func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes130RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1921,7 +1981,7 @@ func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes128(), + cloudstack.WithRedhat9Kubernetes130(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1935,7 +1995,7 @@ func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes128(), + cloudstack.WithRedhat9Kubernetes130(), ), ) @@ -1956,19 +2016,6 @@ func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { } // Registry mirror -func TestCloudStackKubernetes125RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithRegistryMirrorInsecureSkipVerify(constants.CloudStackProviderName), - ) - runRegistryMirrorConfigFlow(test) -} - func TestCloudStackKubernetes126RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2008,15 +2055,15 @@ func TestCloudStackKubernetes128RedhatRegistryMirrorInsecureSkipVerify(t *testin runRegistryMirrorConfigFlow(test) } -func TestCloudStackKubernetes125RedhatRegistryMirrorAndCert(t *testing.T) { +func TestCloudStackKubernetes130RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorInsecureSkipVerify(constants.CloudStackProviderName), ) runRegistryMirrorConfigFlow(test) } @@ -2073,6 +2120,19 @@ func TestCloudStackKubernetes129RedhatRegistryMirrorAndCert(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestCloudStackKubernetes130RedhatRegistryMirrorAndCert(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestCloudStackKubernetes125RedhatAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2126,15 +2186,6 @@ func TestCloudStackKubernetes128RedhatAuthenticatedRegistryMirror(t *testing.T) } // Simpleflow -func TestCloudStackKubernetes125RedHat8SimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runSimpleFlow(test) -} - func TestCloudStackKubernetes126RedHat8SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2171,11 +2222,11 @@ func TestCloudStackKubernetes129RedHat8SimpleFlow(t *testing.T) { runSimpleFlow(test) } -func TestCloudStackKubernetes125RedHat9SimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHat8SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -2216,13 +2267,11 @@ func TestCloudStackKubernetes129RedHat9SimpleFlow(t *testing.T) { runSimpleFlow(test) } -func TestCloudStackKubernetes125ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHat9SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(3)), - framework.WithClusterFiller(api.WithWorkerNodeCount(5)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -2271,12 +2320,13 @@ func TestCloudStackKubernetes129ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) runSimpleFlow(test) } -func TestCloudStackKubernetes125MultiEndpointSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125(), - framework.WithCloudStackFillers(framework.UpdateAddCloudStackAz2())), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(5)), ) runSimpleFlow(test) } @@ -2320,15 +2370,13 @@ func TestCloudStackKubernetes129MultiEndpointSimpleFlow(t *testing.T) { ) runSimpleFlow(test) } - -func TestCloudStackKubernetes125DifferentNamespaceSimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125(), - framework.WithCloudStackFillers(api.WithCloudStackConfigNamespace(clusterNamespace), - api.WithCloudStackConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), + +func TestCloudStackKubernetes130MultiEndpointSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130(), + framework.WithCloudStackFillers(framework.UpdateAddCloudStackAz2())), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -2369,17 +2417,19 @@ func TestCloudStackKubernetes128DifferentNamespaceSimpleFlow(t *testing.T) { runSimpleFlow(test) } -// Cilium Policy -func TestCloudStackKubernetes125CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130DifferentNamespaceSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130(), + framework.WithCloudStackFillers(api.WithCloudStackConfigNamespace(clusterNamespace), + api.WithCloudStackConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), ) runSimpleFlow(test) } +// Cilium Policy func TestCloudStackKubernetes126CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2410,6 +2460,16 @@ func TestCloudStackKubernetes128CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *t runSimpleFlow(test) } +func TestCloudStackKubernetes130CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)), + ) + runSimpleFlow(test) +} + func TestCloudStackKubernetes125RedhatTo126UpgradeCiliumPolicyEnforcementMode(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) test := framework.NewClusterE2ETest( @@ -2449,15 +2509,6 @@ func TestCloudStackKubernetes126RedhatTo127UpgradeCiliumPolicyEnforcementMode(t } // Stacked etcd -func TestCloudStackKubernetes125StackedEtcdRedhat(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithStackedEtcdTopology())) - runStackedEtcdFlow(test) -} - func TestCloudStackKubernetes126StackedEtcdRedhat(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()), @@ -2494,15 +2545,24 @@ func TestCloudStackKubernetes129StackedEtcdRedhat(t *testing.T) { runStackedEtcdFlow(test) } +func TestCloudStackKubernetes130StackedEtcdRedhat(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology())) + runStackedEtcdFlow(test) +} + // Taints -func TestCloudStackKubernetes125RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat125ProviderWithTaints(t) +func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat126ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2511,7 +2571,7 @@ func TestCloudStackKubernetes125RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube126, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2521,14 +2581,14 @@ func TestCloudStackKubernetes125RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat126ProviderWithTaints(t) +func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat127ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2537,7 +2597,7 @@ func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube127, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2547,14 +2607,14 @@ func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat127ProviderWithTaints(t) +func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat128ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2563,7 +2623,7 @@ func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube127, + v1alpha1.Kube128, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2573,14 +2633,14 @@ func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat128ProviderWithTaints(t) +func TestCloudStackKubernetes130RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat130ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2589,7 +2649,7 @@ func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2599,7 +2659,7 @@ func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func redhat125ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat126ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2613,11 +2673,11 @@ func redhat125ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes125(), + framework.WithCloudStackRedhat9Kubernetes126(), ) } -func redhat126ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat127ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2631,11 +2691,11 @@ func redhat126ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes126(), + framework.WithCloudStackRedhat9Kubernetes127(), ) } -func redhat127ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat128ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2649,11 +2709,11 @@ func redhat127ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes127(), + framework.WithCloudStackRedhat9Kubernetes128(), ) } -func redhat128ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat130ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2667,12 +2727,12 @@ func redhat128ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes128(), + framework.WithCloudStackRedhat9Kubernetes130(), ) } // Upgrade -func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2682,13 +2742,13 @@ func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes125(), + framework.WithCloudStackRedhat9Kubernetes126(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2697,7 +2757,7 @@ func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube126, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2712,7 +2772,7 @@ func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2722,13 +2782,13 @@ func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes126(), + framework.WithCloudStackRedhat9Kubernetes127(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2737,7 +2797,7 @@ func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube127, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2752,7 +2812,7 @@ func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2762,13 +2822,13 @@ func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes127(), + framework.WithCloudStackRedhat9Kubernetes128(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2777,7 +2837,7 @@ func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube127, + v1alpha1.Kube128, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2792,7 +2852,7 @@ func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2802,13 +2862,13 @@ func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes128(), + framework.WithCloudStackRedhat9Kubernetes129(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2817,7 +2877,7 @@ func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube129, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2832,7 +2892,7 @@ func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes130RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2842,13 +2902,13 @@ func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes129(), + framework.WithCloudStackRedhat9Kubernetes130(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2857,7 +2917,7 @@ func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2872,25 +2932,6 @@ func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat8UnstackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat125()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runSimpleUpgradeFlow( - test, - v1alpha1.Kube126, - framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat126Template()), - ) -} - func TestCloudStackKubernetes126To127Redhat8UnstackedEtcdUpgrade(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat126()) test := framework.NewClusterE2ETest( @@ -2948,22 +2989,22 @@ func TestCloudStackKubernetes128To129Redhat8UnstackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat8StackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat125()) +func TestCloudStackKubernetes129To130Redhat8UnstackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat129()) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube130, framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat126Template()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat130Template()), ) } @@ -3005,20 +3046,22 @@ func TestCloudStackKubernetes127To128Redhat8StackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat9UnstackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129To130Redhat8StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat130Template()), ) } @@ -3073,21 +3116,20 @@ func TestCloudStackKubernetes128To129Redhat9UnstackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat9StackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129To130Redhat9UnstackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithStackedEtcdTopology()), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } @@ -3127,21 +3169,21 @@ func TestCloudStackKubernetes127To128Redhat9StackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125Redhat8ToRedhat9Upgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat125()) +func TestCloudStackKubernetes129To130Redhat9StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube125)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes125Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } @@ -3217,37 +3259,25 @@ func TestCloudStackKubernetes129Redhat8ToRedhat9Upgrade(t *testing.T) { ) } -// TODO: investigate these tests further as they pass even without the expected behavior(upgrade should fail the first time and continue from the checkpoint on second upgrade) -func TestCloudStackKubernetes125RedhatTo126UpgradeWithCheckpoint(t *testing.T) { - var clusterOpts []framework.ClusterE2ETestOpt - var clusterOpts2 []framework.ClusterE2ETestOpt - - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130Redhat8ToRedhat9Upgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat130()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), ) - - clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes125Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) - - commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} - - clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) - - runUpgradeFlowWithCheckpoint( + runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - clusterOpts, - clusterOpts2, - commandOpts, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } +// TODO: investigate these tests further as they pass even without the expected behavior(upgrade should fail the first time and continue from the checkpoint on second upgrade) func TestCloudStackKubernetes126RedhatTo127UpgradeWithCheckpoint(t *testing.T) { var clusterOpts []framework.ClusterE2ETestOpt var clusterOpts2 []framework.ClusterE2ETestOpt @@ -3262,12 +3292,12 @@ func TestCloudStackKubernetes126RedhatTo127UpgradeWithCheckpoint(t *testing.T) { ) clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) runUpgradeFlowWithCheckpoint( test, @@ -3292,12 +3322,12 @@ func TestCloudStackKubernetes127RedhatTo128UpgradeWithCheckpoint(t *testing.T) { ) clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes128Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes128Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) runUpgradeFlowWithCheckpoint( test, @@ -3308,19 +3338,33 @@ func TestCloudStackKubernetes127RedhatTo128UpgradeWithCheckpoint(t *testing.T) { ) } -func TestCloudStackKubernetes125RedhatControlPlaneNodeUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129RedhatTo130UpgradeWithCheckpoint(t *testing.T) { + var clusterOpts []framework.ClusterE2ETestOpt + var clusterOpts2 []framework.ClusterE2ETestOpt + + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runSimpleUpgradeFlow( + + clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.ExpectFailure(true), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes129Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) + + commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} + + clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.ExpectFailure(false), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) + + runUpgradeFlowWithCheckpoint( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), + v1alpha1.Kube130, + clusterOpts, + clusterOpts2, + commandOpts, ) } @@ -3388,19 +3432,19 @@ func TestCloudStackKubernetes129RedhatControlPlaneNodeUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125RedhatWorkerNodeUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130RedhatControlPlaneNodeUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(3)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), ) } @@ -3468,22 +3512,19 @@ func TestCloudStackKubernetes129RedhatWorkerNodeUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126RedhatMultipleFieldsUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130RedhatWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - provider.WithProviderUpgrade( - provider.Redhat9Kubernetes126Template(), - framework.UpdateLargerCloudStackComputeOffering(), - ), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), ) } @@ -3544,32 +3585,51 @@ func TestCloudStackKubernetes128To129RedhatMultipleFieldsUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes128To129StackedEtcdRedhatMultipleFieldsUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) +func TestCloudStackKubernetes129To130RedhatMultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + provider.WithProviderUpgrade( + provider.Redhat9Kubernetes130Template(), + framework.UpdateLargerCloudStackComputeOffering(), + ), + ) +} + +func TestCloudStackKubernetes129To130StackedEtcdRedhatMultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithStackedEtcdTopology()), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), provider.WithProviderUpgrade( - provider.Redhat9Kubernetes129Template(), + provider.Redhat9Kubernetes130Template(), framework.UpdateLargerCloudStackComputeOffering(), ), ) } // This test is skipped as registry mirror was not configured for CloudStack -func TestCloudStackKubernetes125RedhatAirgappedRegistryMirror(t *testing.T) { +func TestCloudStackKubernetes126RedhatAirgappedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes125(), + framework.WithCloudStackRedhat9Kubernetes126(), framework.WithCloudStackFillers( framework.RemoveAllCloudStackAzs(), framework.UpdateAddCloudStackAz3(), @@ -3581,17 +3641,17 @@ func TestCloudStackKubernetes125RedhatAirgappedRegistryMirror(t *testing.T) { api.WithWorkerNodeCount(1), ), // framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), there is a bug that the etcd node download etcd from internet - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName), ) runAirgapConfigFlow(test, "10.0.0.1/8") } -func TestCloudStackKubernetes126RedhatAirgappedRegistryMirror(t *testing.T) { +func TestCloudStackKubernetes128RedhatAirgappedProxy(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes126(), + framework.WithCloudStackRedhat9Kubernetes128(), framework.WithCloudStackFillers( framework.RemoveAllCloudStackAzs(), framework.UpdateAddCloudStackAz3(), @@ -3602,11 +3662,11 @@ func TestCloudStackKubernetes126RedhatAirgappedRegistryMirror(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - // framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), there is a bug that the etcd node download etcd from internet - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), - framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithProxy(framework.CloudstackProxyRequiredEnvVars), ) - runAirgapConfigFlow(test, "10.0.0.1/8") + + runAirgapConfigProxyFlow(test, "10.0.0.1/8") } // Workload API @@ -4084,6 +4144,28 @@ func TestCloudStackWorkloadClusterOIDCAuthGithubFluxAPI(t *testing.T) { test.DeleteManagementCluster() } +func TestCloudStackKubernetes129EtcdEncryption(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + ), + framework.WithPodIamConfig(), + ) + test.OSFamily = v1alpha1.RedHat + test.GenerateClusterConfig() + test.CreateCluster() + test.PostClusterCreateEtcdEncryptionSetup() + test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{framework.WithEtcdEncrytion()}) + test.StopIfFailed() + test.ValidateEtcdEncryption() + test.DeleteCluster() +} + func TestCloudStackKubernetes127To128RedHatManagementCPUpgradeAPI(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes127()) test := framework.NewClusterE2ETest( diff --git a/test/e2e/conformance_test.go b/test/e2e/conformance_test.go index c72283f432c5..bd7d55fc4fe0 100644 --- a/test/e2e/conformance_test.go +++ b/test/e2e/conformance_test.go @@ -49,6 +49,36 @@ func TestDockerKubernetes126ThreeWorkersConformanceFlow(t *testing.T) { runConformanceFlow(test) } +func TestDockerKubernetes127ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestDockerKubernetes128ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestDockerKubernetes129ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + func TestVSphereKubernetes125ThreeWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -69,6 +99,36 @@ func TestVSphereKubernetes126ThreeWorkersConformanceFlow(t *testing.T) { runConformanceFlow(test) } +func TestVSphereKubernetes127ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu127()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes128ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu128()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes129ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu129()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + func TestVSphereKubernetes125BottleRocketThreeWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -89,6 +149,36 @@ func TestVSphereKubernetes126BottleRocketThreeWorkersConformanceFlow(t *testing. runConformanceFlow(test) } +func TestVSphereKubernetes127BottleRocketThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket127()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes128BottleRocketThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket128()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes129BottleRocketThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket129()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + func TestTinkerbellKubernetes125ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -115,6 +205,45 @@ func TestTinkerbellKubernetes126ThreeReplicasTwoWorkersConformanceFlow(t *testin runTinkerbellConformanceFlow(test) } +func TestTinkerbellKubernetes127ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(2)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithControlPlaneHardware(3), + framework.WithWorkerHardware(2), + ) + runTinkerbellConformanceFlow(test) +} + +func TestTinkerbellKubernetes128ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(2)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithControlPlaneHardware(3), + framework.WithWorkerHardware(2), + ) + runTinkerbellConformanceFlow(test) +} + +func TestTinkerbellKubernetes129ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(2)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithControlPlaneHardware(3), + framework.WithWorkerHardware(2), + ) + runTinkerbellConformanceFlow(test) +} + func TestTinkerbellKubernetes125BottleRocketThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -170,3 +299,23 @@ func TestNutanixKubernetes127ThreeWorkersConformanceFlow(t *testing.T) { ) runConformanceFlow(test) } + +func TestNutanixKubernetes128ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu128Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestNutanixKubernetes129ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu129Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} diff --git a/test/e2e/constants.go b/test/e2e/constants.go index 600138a56c4d..47e0c9892e89 100644 --- a/test/e2e/constants.go +++ b/test/e2e/constants.go @@ -48,5 +48,5 @@ const ( var ( EksaPackageControllerHelmValues = []string{"sourceRegistry=public.ecr.aws/l0g8r8j6"} - KubeVersions = []v1alpha1.KubernetesVersion{v1alpha1.Kube125, v1alpha1.Kube126, v1alpha1.Kube127, v1alpha1.Kube128, v1alpha1.Kube129} + KubeVersions = []v1alpha1.KubernetesVersion{v1alpha1.Kube125, v1alpha1.Kube126, v1alpha1.Kube127, v1alpha1.Kube128, v1alpha1.Kube129, v1alpha1.Kube130} ) diff --git a/test/e2e/curatedpackages.go b/test/e2e/curatedpackages.go index cd7f703f2f54..547b30e82e82 100644 --- a/test/e2e/curatedpackages.go +++ b/test/e2e/curatedpackages.go @@ -21,7 +21,6 @@ import ( func runCuratedPackageInstall(test *framework.ClusterE2ETest) { test.SetPackageBundleActive() - test.GenerateSupportBundleOnCleanupIfTestFailed() err := WaitForPackageToBeInstalled(test, context.Background(), "eks-anywhere-packages", 3*time.Minute) if err != nil { test.T.Fatalf("packages controller not in installed state: %s", err) @@ -34,7 +33,7 @@ func runCuratedPackageInstall(test *framework.ClusterE2ETest) { packagePrefix := "test" packageFile := test.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace) test.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ClusterName)) - test.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withMgmtCluster(test)) + test.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withCluster(test)) } func runCuratedPackageInstallSimpleFlow(test *framework.ClusterE2ETest) { @@ -49,6 +48,10 @@ func runDisabledCuratedPackageInstallSimpleFlow(test *framework.ClusterE2ETest) test.WithCluster(runDisabledCuratedPackage) } +func runCuratedPackageInstallSimpleFlowRegistryMirror(test *framework.ClusterE2ETest) { + test.WithClusterRegistryMirror(runCuratedPackageInstall) +} + func runCuratedPackageRemoteClusterInstallSimpleFlow(test *framework.MulticlusterE2ETest) { test.CreateManagementClusterWithConfig() test.RunInWorkloadClusters(func(e *framework.WorkloadCluster) { @@ -62,7 +65,7 @@ func runCuratedPackageRemoteClusterInstallSimpleFlow(test *framework.Multicluste packagePrefix := "test" packageFile := e.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace) test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName)) - e.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withMgmtCluster(test.ManagementCluster)) + e.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withCluster(test.ManagementCluster)) e.DeleteClusterWithKubectl() e.ValidateClusterDelete() }) @@ -216,7 +219,7 @@ func packageBundleURI(version v1alpha1.KubernetesVersion) string { return fmt.Sprintf("%s:%s", EksaPackageBundleURI, tag) } -func withMgmtCluster(cluster *framework.ClusterE2ETest) *types.Cluster { +func withCluster(cluster *framework.ClusterE2ETest) *types.Cluster { return &types.Cluster{ Name: cluster.ClusterName, KubeconfigFile: filepath.Join(cluster.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", cluster.ClusterName)), diff --git a/test/e2e/docker_test.go b/test/e2e/docker_test.go index 8d7dc2311084..d70911ac2d68 100644 --- a/test/e2e/docker_test.go +++ b/test/e2e/docker_test.go @@ -94,18 +94,6 @@ func TestDockerInstallGithubFluxDuringUpgrade(t *testing.T) { ) } -func TestDockerKubernetes125CuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageInstallSimpleFlow(test) -} - func TestDockerKubernetes126CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, @@ -154,16 +142,16 @@ func TestDockerKubernetes129CuratedPackagesSimpleFlow(t *testing.T) { runCuratedPackageInstallSimpleFlow(test) } -func TestDockerKubernetes125CuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageEmissaryInstallSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) } func TestDockerKubernetes126CuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -214,16 +202,16 @@ func TestDockerKubernetes129CuratedPackagesEmissarySimpleFlow(t *testing.T) { runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestDockerKubernetes125CuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) + runCuratedPackageEmissaryInstallSimpleFlow(test) } func TestDockerKubernetes126CuratedPackagesHarborSimpleFlow(t *testing.T) { @@ -274,15 +262,16 @@ func TestDockerKubernetes129CuratedPackagesHarborSimpleFlow(t *testing.T) { runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestDockerKubernetes125CuratedPackagesAdotSimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + test := framework.NewClusterE2ETest(t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } func TestDockerKubernetes126CuratedPackagesAdotSimpleFlow(t *testing.T) { @@ -329,15 +318,15 @@ func TestDockerKubernetes129CuratedPackagesAdotSimpleFlow(t *testing.T) { runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary } -func TestDockerKubernetes125CuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesAdotSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) + runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary } func TestDockerKubernetes126CuratedPackagesPrometheusSimpleFlow(t *testing.T) { @@ -384,16 +373,15 @@ func TestDockerKubernetes129CuratedPackagesPrometheusSimpleFlow(t *testing.T) { runCuratedPackagesPrometheusInstallSimpleFlow(test) } -func TestDockerKubernetes125CuratedPackagesDisabled(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, - &v1alpha1.PackageConfiguration{Disable: true}), + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary + runCuratedPackagesPrometheusInstallSimpleFlow(test) } func TestDockerKubernetes126CuratedPackagesDisabled(t *testing.T) { @@ -420,8 +408,16 @@ func TestDockerKubernetes128CuratedPackagesDisabled(t *testing.T) { runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary } -func TestDockerKubernetes125CuratedPackagesMetalLB(t *testing.T) { - RunMetalLBDockerTestsForKubeVersion(t, v1alpha1.Kube125) +func TestDockerKubernetes129CuratedPackagesDisabled(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, + &v1alpha1.PackageConfiguration{Disable: true}), + ) + runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary } func TestDockerKubernetes126CuratedPackagesMetalLB(t *testing.T) { @@ -440,16 +436,11 @@ func TestDockerKubernetes129CuratedPackagesMetalLB(t *testing.T) { RunMetalLBDockerTestsForKubeVersion(t, v1alpha1.Kube129) } -// AWS IAM Auth -func TestDockerKubernetes125AWSIamAuth(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewDocker(t), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runAWSIamAuthFlow(test) +func TestDockerKubernetes130CuratedPackagesMetalLB(t *testing.T) { + RunMetalLBDockerTestsForKubeVersion(t, v1alpha1.Kube130) } +// AWS IAM Auth func TestDockerKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewDocker(t), @@ -486,6 +477,15 @@ func TestDockerKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } +func TestDockerKubernetes130AWSIamAuth(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewDocker(t), + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runAWSIamAuthFlow(test) +} + // Flux func TestDockerKubernetes125UpgradeWorkloadClusterWithGithubFlux(t *testing.T) { provider := framework.NewDocker(t) @@ -526,15 +526,6 @@ func TestDockerKubernetes125UpgradeWorkloadClusterWithGithubFlux(t *testing.T) { } // OIDC -func TestDockerKubernetes125OIDC(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewDocker(t), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runOIDCFlow(test) -} - func TestDockerKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewDocker(t), @@ -571,66 +562,66 @@ func TestDockerKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } +func TestDockerKubernetes130OIDC(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewDocker(t), + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runOIDCFlow(test) +} + // RegistryMirror -func TestDockerKubernetes127RegistryMirrorAndCert(t *testing.T) { +func TestDockerKubernetes130RegistryMirrorAndCert(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName), ) runRegistryMirrorConfigFlow(test) } -func TestDockerKubernetes127AirgappedRegistryMirrorAndCert(t *testing.T) { +func TestDockerKubernetes130AirgappedRegistryMirrorAndCert(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName), ) runDockerAirgapConfigFlow(test) } -func TestDockerKubernetes127AirgappedUpgradeFromLatestRegistryMirrorAndCert(t *testing.T) { +func TestDockerKubernetes130AirgappedUpgradeFromLatestRegistryMirrorAndCert(t *testing.T) { release := latestMinorRelease(t) test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName), ) runDockerAirgapUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube127, + v1alpha1.Kube130, ) } -func TestDockerKubernetes127RegistryMirrorInsecureSkipVerify(t *testing.T) { +func TestDockerKubernetes130RegistryMirrorInsecureSkipVerify(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorInsecureSkipVerify(constants.DockerProviderName), ) runRegistryMirrorConfigFlow(test) } // Simple flow -func TestDockerKubernetes125SimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runSimpleFlow(test) -} - func TestDockerKubernetes126SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -667,6 +658,15 @@ func TestDockerKubernetes129SimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestDockerKubernetes130SimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + // Stacked etcd func TestDockerKubernetesStackedEtcd(t *testing.T) { test := framework.NewClusterE2ETest(t, @@ -676,14 +676,14 @@ func TestDockerKubernetesStackedEtcd(t *testing.T) { } // Taints -func TestDockerKubernetes128Taints(t *testing.T) { +func TestDockerKubernetes130Taints(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -695,7 +695,7 @@ func TestDockerKubernetes128Taints(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -705,14 +705,14 @@ func TestDockerKubernetes128Taints(t *testing.T) { ) } -func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) { +func TestDockerKubernetes130WorkloadClusterTaints(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -726,7 +726,7 @@ func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) { t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -741,43 +741,43 @@ func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) { } // Upgrade -func TestDockerKubernetes127To128StackedEtcdUpgrade(t *testing.T) { +func TestDockerKubernetes129To130StackedEtcdUpgrade(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), ) } -func TestDockerKubernetes127To128ExternalEtcdUpgrade(t *testing.T) { +func TestDockerKubernetes129To130ExternalEtcdUpgrade(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), ) } -func TestDockerKubernetes125to126UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes126to127UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -785,18 +785,18 @@ func TestDockerKubernetes125to126UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), + v1alpha1.Kube127, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), ) } -func TestDockerKubernetes126to127UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes127to128UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -804,18 +804,18 @@ func TestDockerKubernetes126to127UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube127, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), + v1alpha1.Kube128, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), ) } -func TestDockerKubernetes127to128UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes128to129UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -823,18 +823,18 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), ) } -func TestDockerKubernetes128to129UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes129to130UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -842,8 +842,8 @@ func TestDockerKubernetes128to129UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), ) } @@ -1022,7 +1022,7 @@ func TestDockerUpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsAPI(t *testin ) } -func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) { +func TestDockerKubernetes129to130UpgradeFromLatestMinorReleaseAPI(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( @@ -1030,7 +1030,7 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) ) managementCluster.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release)) managementCluster.UpdateClusterConfig(api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), )) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1039,7 +1039,7 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) ) wc.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release)) wc.UpdateClusterConfig(api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1050,18 +1050,18 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) runMulticlusterUpgradeFromReleaseFlowAPI( test, release, - v1alpha1.Kube128, + v1alpha1.Kube130, "", ) } -func TestDockerUpgradeKubernetes127to128WorkloadClusterScaleupGitHubFluxAPI(t *testing.T) { +func TestDockerUpgradeKubernetes129to130WorkloadClusterScaleupGitHubFluxAPI(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -1074,7 +1074,7 @@ func TestDockerUpgradeKubernetes127to128WorkloadClusterScaleupGitHubFluxAPI(t *t t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1086,20 +1086,20 @@ func TestDockerUpgradeKubernetes127to128WorkloadClusterScaleupGitHubFluxAPI(t *t runWorkloadClusterUpgradeFlowAPIWithFlux( test, api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), api.WithWorkerNodeGroup("worker-0", api.WithCount(2)), ), ) } -func TestDockerKubernetes128UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t *testing.T) { +func TestDockerKubernetes130UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -1112,7 +1112,7 @@ func TestDockerKubernetes128UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1136,13 +1136,13 @@ func TestDockerKubernetes128UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t ) } -func TestDockerKubernetes128UpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsGitHubFluxAPI(t *testing.T) { +func TestDockerKubernetes130UpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsGitHubFluxAPI(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -1158,7 +1158,7 @@ func TestDockerKubernetes128UpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroups t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1371,12 +1371,12 @@ func TestDockerKubernetesUpgradeManagementComponents(t *testing.T) { } // etcd scale tests -func TestDockerKubernetes128EtcdScaleUp(t *testing.T) { +func TestDockerKubernetes130EtcdScaleUp(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1385,19 +1385,19 @@ func TestDockerKubernetes128EtcdScaleUp(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithExternalEtcdTopology(3), ), ) } -func TestDockerKubernetes128EtcdScaleDown(t *testing.T) { +func TestDockerKubernetes130EtcdScaleDown(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(3), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1406,20 +1406,20 @@ func TestDockerKubernetes128EtcdScaleDown(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithExternalEtcdTopology(1), ), ) } -func TestDockerKubernetes127to128EtcdScaleUp(t *testing.T) { +func TestDockerKubernetes129to130EtcdScaleUp(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1428,21 +1428,21 @@ func TestDockerKubernetes127to128EtcdScaleUp(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(3), ), ) } -func TestDockerKubernetes127to128EtcdScaleDown(t *testing.T) { +func TestDockerKubernetes129to130EtcdScaleDown(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(3), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1451,9 +1451,9 @@ func TestDockerKubernetes127to128EtcdScaleDown(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), ), ) diff --git a/test/e2e/emissary.go b/test/e2e/emissary.go index ef6c140f9fbf..73a6288da0a4 100644 --- a/test/e2e/emissary.go +++ b/test/e2e/emissary.go @@ -20,9 +20,9 @@ func runCuratedPackageEmissaryInstall(test *framework.ClusterE2ETest) { test.SetPackageBundleActive() packageFile := test.BuildPackageConfigFile(emissaryPackageName, emissaryPackagePrefix, EksaPackagesNamespace) test.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ClusterName)) - test.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withMgmtCluster(test)) + test.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withCluster(test)) if test.Provider.Name() == constants.DockerProviderName { - test.TestEmissaryPackageRouting(emissaryPackagePrefix+"-"+emissaryPackageName, "hello", withMgmtCluster(test)) + test.TestEmissaryPackageRouting(emissaryPackagePrefix+"-"+emissaryPackageName, "hello", withCluster(test)) } } @@ -41,7 +41,7 @@ func runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test *framework.Mul test.ManagementCluster.SetPackageBundleActive() packageFile := e.BuildPackageConfigFile(emissaryPackageName, emissaryPackagePrefix, EksaPackagesNamespace) test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName)) - e.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withMgmtCluster(test.ManagementCluster)) + e.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withCluster(test.ManagementCluster)) e.DeleteClusterWithKubectl() e.ValidateClusterDelete() }) diff --git a/test/e2e/nutanix_test.go b/test/e2e/nutanix_test.go index ea47986ed777..6dcc5c200e94 100644 --- a/test/e2e/nutanix_test.go +++ b/test/e2e/nutanix_test.go @@ -157,78 +157,6 @@ func TestNutanixKubernetes126UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestNutanixKubernetes125UbuntuCuratedPackagesSimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageEmissaryInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackagesAdotInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { - minNodes := 1 - maxNodes := 2 - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runAutoscalerWithMetricsServerSimpleFlow(test) -} - func TestNutanixKubernetes128UbuntuCuratedPackagesSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewNutanix(t, framework.WithUbuntu128Nutanix()), @@ -373,16 +301,79 @@ func TestNutanixKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t runAutoscalerWithMetricsServerSimpleFlow(test) } -// Simpleflow -func TestNutanixKubernetes125UbuntuSimpleFlowWithName(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), +func TestNutanixKubernetes130UbuntuCuratedPackagesSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageEmissaryInstallSimpleFlow(test) } +func TestNutanixKubernetes130UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesAdotInstallSimpleFlow(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesPrometheusInstallSimpleFlow(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { + minNodes := 1 + maxNodes := 2 + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runAutoscalerWithMetricsServerSimpleFlow(test) +} + +// Simpleflow func TestNutanixKubernetes126UbuntuSimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -419,11 +410,11 @@ func TestNutanixKubernetes129UbuntuSimpleFlowWithName(t *testing.T) { runSimpleFlow(test) } -func TestNutanixKubernetes125RedHat8SimpleFlowWithName(t *testing.T) { +func TestNutanixKubernetes130UbuntuSimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithRedHat125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -455,11 +446,20 @@ func TestNutanixKubernetes128RedHat8SimpleFlowWithName(t *testing.T) { runSimpleFlow(test) } -func TestNutanixKubernetes125RedHat9SimpleFlowWithName(t *testing.T) { +func TestNutanixKubernetes129RedHat8SimpleFlowWithName(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithRedHat129Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleFlow(test) +} + +func TestNutanixKubernetes130RedHat8SimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithRedHat9Kubernetes125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithRedHat130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -500,13 +500,11 @@ func TestNutanixKubernetes129RedHat9SimpleFlowWithName(t *testing.T) { runSimpleFlow(test) } -func TestNutanixKubernetes125UbuntuSimpleFlowWithUUID(t *testing.T) { +func TestNutanixKubernetes130RedHat9SimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithUbuntu125NutanixUUID(), - framework.WithPrismElementClusterUUID(), - framework.WithNutanixSubnetUUID()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithRedHat9Kubernetes130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -555,6 +553,17 @@ func TestNutanixKubernetes129UbuntuSimpleFlowWithUUID(t *testing.T) { runSimpleFlow(test) } +func TestNutanixKubernetes130UbuntuSimpleFlowWithUUID(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu130NutanixUUID(), + framework.WithPrismElementClusterUUID(), + framework.WithNutanixSubnetUUID()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestNutanixKubernetes128RedHatSimpleFlowWithUUID(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -577,6 +586,17 @@ func TestNutanixKubernetes129RedHatSimpleFlowWithUUID(t *testing.T) { runSimpleFlow(test) } +func TestNutanixKubernetes130RedHatSimpleFlowWithUUID(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithRedHat130NutanixUUID(), + framework.WithPrismElementClusterUUID(), + framework.WithNutanixSubnetUUID()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestNutanixKubernetes128RedHat9SimpleFlowWithUUID(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -599,24 +619,18 @@ func TestNutanixKubernetes129RedHat9SimpleFlowWithUUID(t *testing.T) { runSimpleFlow(test) } -// Upgrade -func TestNutanixKubernetes125To126UbuntuUpgrade(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes130RedHat9SimpleFlowWithUUID(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runSimpleUpgradeFlow( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), + framework.NewNutanix(t, framework.WithRedHat9Kubernetes130NutanixUUID(), + framework.WithPrismElementClusterUUID(), + framework.WithNutanixSubnetUUID()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) + runSimpleFlow(test) } +// Upgrade func TestNutanixKubernetes126To127UbuntuUpgrade(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix()) test := framework.NewClusterE2ETest( @@ -665,6 +679,22 @@ func TestNutanixKubernetes128To129StackedEtcdUbuntuUpgrade(t *testing.T) { ) } +func TestNutanixKubernetes129To130StackedEtcdUbuntuUpgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + func TestNutanixKubernetes128To129UbuntuUpgrade(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu128Nutanix()) test := framework.NewClusterE2ETest( @@ -681,20 +711,19 @@ func TestNutanixKubernetes128To129UbuntuUpgrade(t *testing.T) { ) } -func TestNutanixKubernetes125to126RedHatUpgrade(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithRedHat125Nutanix()) +func TestNutanixKubernetes129To130UbuntuUpgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.RedHat126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -768,20 +797,20 @@ func TestNutanixKubernetes128to129StackedEtcdRedHat8Upgrade(t *testing.T) { ) } -func TestNutanixKubernetes125to126RedHat9Upgrade(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithRedHat9Kubernetes125Nutanix()) +func TestNutanixKubernetes129to130RedHatUpgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithRedHat129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.RedHat9Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.RedHat130Template()), ) } @@ -838,6 +867,23 @@ func TestNutanixKubernetes128to129StackedEtcdRedHat9Upgrade(t *testing.T) { ) } +func TestNutanixKubernetes129to130RedHat9Upgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithRedHat9Kubernetes129Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.RedHat9Kubernetes130Template()), + ) +} + func TestNutanixKubernetes128UbuntuWorkerNodeScaleUp1To3(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu128Nutanix()) test := framework.NewClusterE2ETest( @@ -872,20 +918,19 @@ func TestNutanixKubernetes129UbuntuWorkerNodeScaleUp1To3(t *testing.T) { ) } -// 1 worker node cluster scaled up to 3 -func TestNutanixKubernetes125UbuntuWorkerNodeScaleUp1To3(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes130UbuntuWorkerNodeScaleUp1To3(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(3)), ) } @@ -944,24 +989,6 @@ func TestNutanixKubernetes128UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { ) } -// 1 node control plane cluster scaled up to 3 -func TestNutanixKubernetes125UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(3)), - ) - runSimpleUpgradeFlow( - test, - v1alpha1.Kube125, - framework.WithClusterFiller(api.WithControlPlaneCount(3)), - ) -} - // 1 node control plane cluster scaled up to 3 func TestNutanixKubernetes126UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix()) @@ -1015,21 +1042,37 @@ func TestNutanixKubernetes128UbuntuWorkerNodeScaleDown3To1(t *testing.T) { ) } -// 3 worker node cluster scaled down to 1 -func TestNutanixKubernetes125UbuntuWorkerNodeScaleDown3To1(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes129UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube129, framework.WithClusterFiller(api.WithControlPlaneCount(3)), + ) +} + +func TestNutanixKubernetes130UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), + v1alpha1.Kube130, + framework.WithClusterFiller(api.WithControlPlaneCount(3)), ) } @@ -1086,21 +1129,37 @@ func TestNutanixKubernetes128UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { ) } -// 3 node control plane cluster scaled down to 1 -func TestNutanixKubernetes125UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes129UbuntuWorkerNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterFiller(api.WithControlPlaneCount(1)), + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), + ) +} + +func TestNutanixKubernetes130UbuntuWorkerNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), ) } @@ -1140,20 +1199,42 @@ func TestNutanixKubernetes127UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { ) } -// OIDC Tests -func TestNutanixKubernetes125OIDC(t *testing.T) { +// 3 node control plane cluster scaled down to 1 +func TestNutanixKubernetes129UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube129, framework.WithClusterFiller(api.WithControlPlaneCount(1)), + ) +} + +func TestNutanixKubernetes130UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runOIDCFlow(test) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + ) } +// OIDC Tests func TestNutanixKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1193,20 +1274,20 @@ func TestNutanixKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } -// AWS IAM Authenticator Tests -func TestNutanixKubernetes125AWSIamAuth(t *testing.T) { +func TestNutanixKubernetes130OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runAWSIamAuthFlow(test) + runOIDCFlow(test) } +// AWS IAM Authenticator Tests func TestNutanixKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1246,13 +1327,26 @@ func TestNutanixKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestNutanixKubernetes128UbuntuManagementCPUpgradeAPI(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu128Nutanix()) +func TestNutanixKubernetes130AWSIamAuth(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runAWSIamAuthFlow(test) +} + +func TestNutanixKubernetes130UbuntuManagementCPUpgradeAPI(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) test := framework.NewClusterE2ETest( t, provider, ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithEtcdCountIfExternal(1), api.WithWorkerNodeCount(1), diff --git a/test/e2e/tinkerbell_test.go b/test/e2e/tinkerbell_test.go index ba209136228e..ca7335761db9 100644 --- a/test/e2e/tinkerbell_test.go +++ b/test/e2e/tinkerbell_test.go @@ -19,12 +19,12 @@ import ( // AWS IAM Auth -func TestTinkerbellKubernetes129AWSIamAuth(t *testing.T) { +func TestTinkerbellKubernetes130AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), ) @@ -32,25 +32,6 @@ func TestTinkerbellKubernetes129AWSIamAuth(t *testing.T) { } // Upgrade -func TestTinkerbellKubernetes125UbuntuTo126Upgrade(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithControlPlaneHardware(2), - framework.WithWorkerHardware(2), - ) - runSimpleUpgradeFlowForBareMetal( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(framework.Ubuntu126Image()), - ) -} - func TestTinkerbellKubernetes126UbuntuTo127Upgrade(t *testing.T) { provider := framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()) test := framework.NewClusterE2ETest( @@ -108,71 +89,70 @@ func TestTinkerbellKubernetes128UbuntuTo129Upgrade(t *testing.T) { ) } -func TestTinkerbellKubernetes128UbuntuTo129UpgradeCPOnly(t *testing.T) { - provider := framework.NewTinkerbell(t) - kube128 := v1alpha1.Kube128 +func TestTinkerbellKubernetes129UbuntuTo130Upgrade(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube128)), framework.WithControlPlaneHardware(2), - framework.WithWorkerHardware(1), - ).WithClusterConfig( - provider.WithCPKubeVersionAndOS(kube128, framework.Ubuntu2004), - provider.WithWorkerKubeVersionAndOS(kube128, framework.Ubuntu2004), + framework.WithWorkerHardware(2), ) - runSimpleUpgradeFlowWorkerNodeVersionForBareMetal( + runSimpleUpgradeFlowForBareMetal( test, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), - provider.WithProviderUpgrade(framework.Ubuntu129ImageForCP()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } -func TestTinkerbellKubernetes127UbuntuTo128UpgradeWorkerOnly(t *testing.T) { +func TestTinkerbellKubernetes129UbuntuTo130UpgradeCPOnly(t *testing.T) { provider := framework.NewTinkerbell(t) - kube127 := v1alpha1.Kube127 - kube128 := v1alpha1.Kube128 + kube129 := v1alpha1.Kube129 test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(), - framework.WithClusterFiller(api.WithKubernetesVersion(kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube127)), - framework.WithControlPlaneHardware(1), - framework.WithWorkerHardware(2), + framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube129)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(1), ).WithClusterConfig( - provider.WithCPKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004), - provider.WithWorkerKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004), + provider.WithCPKubeVersionAndOS(kube129, framework.Ubuntu2004), + provider.WithWorkerKubeVersionAndOS(kube129, framework.Ubuntu2004), ) runSimpleUpgradeFlowWorkerNodeVersionForBareMetal( test, - framework.WithClusterUpgrade(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube128)), - provider.WithProviderUpgrade(framework.Ubuntu128ImageForWorker()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130ImageForCP()), ) } -func TestTinkerbellKubernetes125To126Ubuntu2204Upgrade(t *testing.T) { +func TestTinkerbellKubernetes129UbuntuTo130UpgradeWorkerOnly(t *testing.T) { provider := framework.NewTinkerbell(t) + kube129 := v1alpha1.Kube129 + kube130 := v1alpha1.Kube130 test := framework.NewClusterE2ETest( t, provider, + framework.WithClusterFiller(), + framework.WithClusterFiller(api.WithKubernetesVersion(kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithControlPlaneHardware(2), + framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube129)), + framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(2), ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), + provider.WithCPKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004), + provider.WithWorkerKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004), ) - runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + runSimpleUpgradeFlowWorkerNodeVersionForBareMetal( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes126Image()), + framework.WithClusterUpgrade(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130ImageForWorker()), ) } @@ -236,6 +216,46 @@ func TestTinkerbellKubernetes128To129Ubuntu2204Upgrade(t *testing.T) { ) } +func TestTinkerbellKubernetes128To129Ubuntu2204RTOSUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes129RTOSImage()), + ) +} + +func TestTinkerbellKubernetes129To130Ubuntu2204Upgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes130Image()), + ) +} + func TestTinkerbellKubernetes126Ubuntu2004To2204Upgrade(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( @@ -316,12 +336,52 @@ func TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade(t *testing.T) { ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeUpgrade(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes129Ubuntu2004To2204RTOSUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes129RTOSImage()), + ) +} + +func TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes130Image()), + ) +} + +func TestTinkerbellKubernetes130UbuntuWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(1), @@ -329,17 +389,17 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeUpgrade(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(2)), ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUpWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUpWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(1), @@ -380,27 +440,6 @@ func TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI(t *testing.T) { ) } -func TestTinkerbellKubernetes125UbuntuTo126InPlaceUpgrade_1CP_1Worker(t *testing.T) { - provider := framework.NewTinkerbell(t) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithInPlaceUpgradeStrategy()), - framework.WithControlPlaneHardware(1), - framework.WithWorkerHardware(1), - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), - ) - runInPlaceUpgradeFlowForBareMetal( - test, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126), api.WithInPlaceUpgradeStrategy()), - provider.WithProviderUpgrade(framework.Ubuntu126Image()), - ) -} - func TestTinkerbellKubernetes126UbuntuTo127InPlaceUpgrade_1CP_2Worker(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( @@ -464,32 +503,24 @@ func TestTinkerbellKubernetes128UbuntuTo129InPlaceUpgrade_1CP_1Worker(t *testing ) } -func TestTinkerbellKubernetes125UbuntuTo126SingleNodeInPlaceUpgrade(t *testing.T) { +func TestTinkerbellKubernetes129UbuntuTo130InPlaceUpgrade_1CP_1Worker(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithEtcdCountIfExternal(0)), - framework.WithClusterFiller(api.RemoveAllWorkerNodeGroups()), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithInPlaceUpgradeStrategy()), framework.WithControlPlaneHardware(1), + framework.WithWorkerHardware(1), ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlowForBareMetal( test, - framework.WithUpgradeClusterConfig( - api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), - api.WithInPlaceUpgradeStrategy(), - ), - api.TinkerbellToConfigFiller( - api.RemoveTinkerbellWorkerMachineConfig(), - ), - ), - provider.WithProviderUpgrade(framework.Ubuntu126Image()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithInPlaceUpgradeStrategy()), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } @@ -580,6 +611,35 @@ func TestTinkerbellKubernetes128UbuntuTo129SingleNodeInPlaceUpgrade(t *testing.T ) } +func TestTinkerbellKubernetes129UbuntuTo130SingleNodeInPlaceUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(0)), + framework.WithClusterFiller(api.RemoveAllWorkerNodeGroups()), + framework.WithClusterFiller(api.WithInPlaceUpgradeStrategy()), + framework.WithControlPlaneHardware(1), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + ) + runInPlaceUpgradeFlowForBareMetal( + test, + framework.WithUpgradeClusterConfig( + api.ClusterToConfigFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithInPlaceUpgradeStrategy(), + ), + api.TinkerbellToConfigFiller( + api.RemoveTinkerbellWorkerMachineConfig(), + ), + ), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), + ) +} + // Curated packages func TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, @@ -783,90 +843,90 @@ func TestTinkerbellKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *tes runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackageInstallTinkerbellSingleNodeFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackageHarborInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) minNodes := 1 maxNodes := 2 test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(2), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runAutoscalerWithMetricsServerTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), @@ -878,28 +938,28 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeSimpleFlow(t *testing.T) { } // Multicluster -func TestTinkerbellKubernetes129UbuntuWorkloadCluster(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkloadCluster(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(2), framework.WithWorkerHardware(2), ), framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ), ) runTinkerbellWorkloadClusterFlow(test) } -func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkloadClusterWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -907,7 +967,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { framework.WithWorkerHardware(2), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), ), ) test := framework.NewMulticlusterE2ETest( @@ -921,7 +981,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), ), ), @@ -929,8 +989,8 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { runWorkloadClusterWithAPIFlowForBareMetal(test) } -func TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -941,7 +1001,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T ).WithClusterConfig( framework.WithFluxGithubConfig(), api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), ), ) test := framework.NewMulticlusterE2ETest( @@ -955,7 +1015,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), ), ), @@ -1014,15 +1074,15 @@ func TestTinkerbellKubernetes128BottlerocketWorkloadClusterWithAPI(t *testing.T) runWorkloadClusterWithAPIFlowForBareMetal(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadCluster(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), ), @@ -1033,7 +1093,7 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), ), @@ -1042,8 +1102,8 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster(t *testing.T) { runTinkerbellWorkloadClusterFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadClusterWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -1051,7 +1111,7 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testin framework.WithWorkerHardware(0), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), ), @@ -1067,7 +1127,7 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testin framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), @@ -1077,8 +1137,8 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testin runWorkloadClusterWithAPIFlowForBareMetal(test) } -func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellUpgrade130MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -1089,7 +1149,7 @@ func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWith ).WithClusterConfig( framework.WithFluxGithubConfig(), api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.RemoveAllWorkerNodeGroups(), ), ) @@ -1104,7 +1164,7 @@ func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWith framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), ), ), @@ -1116,65 +1176,65 @@ func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWith ) } -func TestTinkerbellUpgrade129MulticlusterWorkloadClusterCPScaleup(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellUpgrade130MulticlusterWorkloadClusterCPScaleup(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(4), framework.WithWorkerHardware(2), ), framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ), ) runSimpleWorkloadUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), ), ) } -func TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()) +func TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade129To130(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithControlPlaneHardware(3), framework.WithWorkerHardware(3), ), framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ), ) runSimpleWorkloadUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), - provider.WithProviderUpgrade(framework.Ubuntu129Image()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } // OIDC -func TestTinkerbellKubernetes129OIDC(t *testing.T) { +func TestTinkerbellKubernetes130OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), ) @@ -1182,11 +1242,11 @@ func TestTinkerbellKubernetes129OIDC(t *testing.T) { } // Registry mirror -func TestTinkerbellKubernetes129UbuntuRegistryMirror(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), framework.WithRegistryMirrorEndpointAndCert(constants.TinkerbellProviderName), @@ -1194,11 +1254,11 @@ func TestTinkerbellKubernetes129UbuntuRegistryMirror(t *testing.T) { runTinkerbellRegistryMirrorFlow(test) } -func TestTinkerbellKubernetes129UbuntuInsecureSkipVerifyRegistryMirror(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuInsecureSkipVerifyRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), framework.WithRegistryMirrorInsecureSkipVerify(constants.TinkerbellProviderName), @@ -1206,11 +1266,11 @@ func TestTinkerbellKubernetes129UbuntuInsecureSkipVerifyRegistryMirror(t *testin runTinkerbellRegistryMirrorFlow(test) } -func TestTinkerbellKubernetes129UbuntuAuthenticatedRegistryMirror(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), framework.WithAuthenticatedRegistryMirror(constants.TinkerbellProviderName), @@ -1219,17 +1279,6 @@ func TestTinkerbellKubernetes129UbuntuAuthenticatedRegistryMirror(t *testing.T) } // Simpleflow -func TestTinkerbellKubernetes125UbuntuSimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithControlPlaneHardware(1), - framework.WithWorkerHardware(1), - ) - runTinkerbellSimpleFlow(test) -} - func TestTinkerbellKubernetes126UbuntuSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1274,17 +1323,15 @@ func TestTinkerbellKubernetes129UbuntuSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes125Ubuntu2204SimpleFlow(t *testing.T) { - provider := framework.NewTinkerbell(t) +func TestTinkerbellKubernetes130UbuntuSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), ) - runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) + runTinkerbellSimpleFlow(test) } func TestTinkerbellKubernetes126Ubuntu2204SimpleFlow(t *testing.T) { @@ -1339,15 +1386,30 @@ func TestTinkerbellKubernetes129Ubuntu2204SimpleFlow(t *testing.T) { runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) } -func TestTinkerbellKubernetes125RedHatSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes129Ubuntu2204RTOSSimpleFlow(t *testing.T) { + provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithRedHat125Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + provider, framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil, true), ) - runTinkerbellSimpleFlow(test) + runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) +} + +func TestTinkerbellKubernetes130Ubuntu2204SimpleFlow(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithControlPlaneHardware(1), + framework.WithWorkerHardware(1), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2204, nil), + ) + runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) } func TestTinkerbellKubernetes126RedHatSimpleFlow(t *testing.T) { @@ -1394,6 +1456,17 @@ func TestTinkerbellKubernetes129RedHatSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } +func TestTinkerbellKubernetes130RedHatSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithRedHat130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithControlPlaneHardware(1), + framework.WithWorkerHardware(1), + ) + runTinkerbellSimpleFlow(test) +} + func TestTinkerbellKubernetes128BottleRocketSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1405,11 +1478,11 @@ func TestTinkerbellKubernetes128BottleRocketSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuThreeControlPlaneReplicasSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuThreeControlPlaneReplicasSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithControlPlaneHardware(3), @@ -1418,11 +1491,11 @@ func TestTinkerbellKubernetes129UbuntuThreeControlPlaneReplicasSimpleFlow(t *tes runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuThreeWorkersSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuThreeWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithWorkerNodeCount(3)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithControlPlaneHardware(1), @@ -1431,12 +1504,12 @@ func TestTinkerbellKubernetes129UbuntuThreeWorkersSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuControlPlaneScaleUp(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuControlPlaneScaleUp(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(3), @@ -1444,17 +1517,17 @@ func TestTinkerbellKubernetes129UbuntuControlPlaneScaleUp(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUp(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUp(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(1), @@ -1462,17 +1535,17 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUp(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(2)), ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleDown(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkerNodeScaleDown(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(2)), framework.WithControlPlaneHardware(1), @@ -1480,17 +1553,17 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleDown(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), ) } -func TestTinkerbellKubernetes129UbuntuControlPlaneScaleDown(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuControlPlaneScaleDown(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(3), @@ -1498,23 +1571,23 @@ func TestTinkerbellKubernetes129UbuntuControlPlaneScaleDown(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithControlPlaneCount(1)), ) } // Worker nodegroup taints and labels -func TestTinkerbellKubernetes129UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewTinkerbell( t, - framework.WithUbuntu129Tinkerbell(), + framework.WithUbuntu130Tinkerbell(), framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel1), framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel2), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneLabel(cpKey1, cpVal1), api.WithControlPlaneTaints([]corev1.Taint{framework.NoScheduleTaint()}), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1537,23 +1610,23 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing // Proxy tests -func TestTinkerbellAirgappedKubernetes129UbuntuProxyConfigFlow(t *testing.T) { +func TestTinkerbellAirgappedKubernetes130UbuntuProxyConfigFlow(t *testing.T) { localIp, err := networkutils.GetLocalIP() if err != nil { t.Fatalf("Cannot get admin machine local IP: %v", err) } t.Logf("Admin machine's IP is: %s", localIp) - kubeVersion := strings.Replace(string(v1alpha1.Kube129), ".", "-", 1) + kubeVersion := strings.Replace(string(v1alpha1.Kube130), ".", "-", 1) test := framework.NewClusterE2ETest( t, framework.NewTinkerbell(t, - framework.WithUbuntu129Tinkerbell(), + framework.WithUbuntu130Tinkerbell(), framework.WithHookImagesURLPath("http://"+localIp.String()+":8080"), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), ), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), @@ -1564,11 +1637,11 @@ func TestTinkerbellAirgappedKubernetes129UbuntuProxyConfigFlow(t *testing.T) { } // OOB test -func TestTinkerbellKubernetes129UbuntuOOB(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuOOB(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithOOBConfiguration(), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), @@ -1576,26 +1649,26 @@ func TestTinkerbellKubernetes129UbuntuOOB(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellK8sUpgrade128to129WithUbuntuOOB(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()) +func TestTinkerbellK8sUpgrade129to130WithUbuntuOOB(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithOOBConfiguration(), framework.WithControlPlaneHardware(2), framework.WithWorkerHardware(2), ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), - provider.WithProviderUpgrade(framework.Ubuntu129Image()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } -func TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()) +func TestTinkerbellSingleNode129To130UbuntuManagementCPUpgradeAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -1603,7 +1676,7 @@ func TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI(t *testing.T) framework.WithWorkerHardware(2), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), @@ -1616,10 +1689,10 @@ func TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI(t *testing.T) runWorkloadClusterUpgradeFlowWithAPIForBareMetal( test, api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) } @@ -1649,3 +1722,61 @@ func TestTinkerbellKubernetes128UpgradeManagementComponents(t *testing.T) { test.RunEKSA([]string{"upgrade", "management-components", "-f", test.ClusterConfigLocation, "-v", "99"}) test.DeleteCluster() } + +// TestTinkerbellKubernetes125UbuntuTo129MultipleUpgrade creates a single 1.25 cluster and upgrades it +// all the way until 1.29. This tests each K8s version upgrade in a single test and saves up +// hardware which would otherwise be needed for each test as part of both create and upgrade. +func TestTinkerbellKubernetes125UbuntuTo129MultipleUpgrade(t *testing.T) { + var kube126clusterOpts []framework.ClusterE2ETestOpt + var kube127clusterOpts []framework.ClusterE2ETestOpt + var kube128clusterOpts []framework.ClusterE2ETestOpt + var kube129clusterOpts []framework.ClusterE2ETestOpt + provider := framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + ) + + kube126clusterOpts = append( + kube126clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube126), + ), + provider.WithProviderUpgrade(framework.Ubuntu126Image()), + ) + kube127clusterOpts = append( + kube127clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube127), + ), + provider.WithProviderUpgrade(framework.Ubuntu127Image()), + ) + kube128clusterOpts = append( + kube128clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube128), + ), + provider.WithProviderUpgrade(framework.Ubuntu128Image()), + ) + kube129clusterOpts = append( + kube129clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube129), + ), + provider.WithProviderUpgrade(framework.Ubuntu129Image()), + ) + runMultipleUpgradesFlowForBareMetal( + test, + kube126clusterOpts, + kube127clusterOpts, + kube128clusterOpts, + kube129clusterOpts, + ) +} diff --git a/test/e2e/tools/eks-anywhere-test-tool/go.mod b/test/e2e/tools/eks-anywhere-test-tool/go.mod index 123376bdf975..585f8df612e6 100644 --- a/test/e2e/tools/eks-anywhere-test-tool/go.mod +++ b/test/e2e/tools/eks-anywhere-test-tool/go.mod @@ -35,9 +35,9 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.22.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/test/e2e/tools/eks-anywhere-test-tool/go.sum b/test/e2e/tools/eks-anywhere-test-tool/go.sum index c8bdc3e009f5..15f256cfccb0 100644 --- a/test/e2e/tools/eks-anywhere-test-tool/go.sum +++ b/test/e2e/tools/eks-anywhere-test-tool/go.sum @@ -938,8 +938,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1046,8 +1046,8 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1061,8 +1061,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/test/e2e/upgrade.go b/test/e2e/upgrade.go index 4b02142a1ac4..30288d2e6b91 100644 --- a/test/e2e/upgrade.go +++ b/test/e2e/upgrade.go @@ -93,6 +93,19 @@ func runInPlaceUpgradeFlowForBareMetal(test *framework.ClusterE2ETest, clusterOp test.ValidateHardwareDecommissioned() } +func runMultipleUpgradesFlowForBareMetal(test *framework.ClusterE2ETest, clusterOpts ...[]framework.ClusterE2ETestOpt) { + test.GenerateHardwareConfig() + test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m")) + for _, opts := range clusterOpts { + test.UpgradeClusterWithNewConfig(opts) + test.GenerateSupportBundleOnCleanupIfTestFailed() + test.ValidateClusterState() + test.StopIfFailed() + } + test.DeleteCluster() + test.ValidateHardwareDecommissioned() +} + // runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration runs the Create, Upgrade and Delete cluster flows // for Baremetal that use the cluster config generated by the WithClusterConfig method when the test object is created, // and avoids regenerating a cluster config with defaults. diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 53b31606bd71..3944b669ddf2 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -63,7 +63,7 @@ func TestVSphereKubernetes129BottlerocketAPIServerExtraArgsUpgradeFlow(t *testin } // Autoimport -func TestVSphereKubernetes125BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes126BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -73,12 +73,12 @@ func TestVSphereKubernetes125BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes126BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes127BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -88,12 +88,12 @@ func TestVSphereKubernetes126BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes127BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes128BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -103,12 +103,12 @@ func TestVSphereKubernetes127BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes128BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes129BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -118,12 +118,12 @@ func TestVSphereKubernetes128BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes129BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes130BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -133,22 +133,12 @@ func TestVSphereKubernetes129BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runAutoImportFlow(test, provider) } // AWS IAM Auth -func TestVSphereKubernetes125AWSIamAuth(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runAWSIamAuthFlow(test) -} - func TestVSphereKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -189,10 +179,10 @@ func TestVSphereKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestVSphereKubernetes125BottleRocketAWSIamAuth(t *testing.T) { +func TestVSphereKubernetes130AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewVSphere(t, framework.WithBottleRocket125()), + framework.NewVSphere(t, framework.WithUbuntu125()), framework.WithAWSIam(), framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), ) @@ -239,35 +229,33 @@ func TestVSphereKubernetes129BottleRocketAWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestVSphereKubernetes127To128AWSIamAuthUpgrade(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereKubernetes130BottleRocketAWSIamAuth(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runAWSIamAuthFlow(test) +} + +func TestVSphereKubernetes129To130AWSIamAuthUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runUpgradeFlowWithAWSIamAuth( test, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } // Curated packages -func TestVSphereKubernetes125CuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageInstallSimpleFlow(test) -} - func TestVSphereKubernetes126CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, @@ -316,12 +304,12 @@ func TestVSphereKubernetes129CuratedPackagesSimpleFlow(t *testing.T) { runCuratedPackageInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu125()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -376,16 +364,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesSimpleFlow(t *testing.T) runCuratedPackageInstallSimpleFlow(test) } -func TestVSphereKubernetes125CuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageEmissaryInstallSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) } func TestVSphereKubernetes126CuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -436,12 +424,12 @@ func TestVSphereKubernetes129CuratedPackagesEmissarySimpleFlow(t *testing.T) { runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -496,16 +484,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesEmissarySimpleFlow(t *te runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestVSphereKubernetes125CuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) + runCuratedPackageEmissaryInstallSimpleFlow(test) } func TestVSphereKubernetes126CuratedPackagesHarborSimpleFlow(t *testing.T) { @@ -556,12 +544,12 @@ func TestVSphereKubernetes129CuratedPackagesHarborSimpleFlow(t *testing.T) { runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube128), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -616,16 +604,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesHarborSimpleFlow(t *test runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestVSphereKubernetes125CuratedPackagesAdotUpdateFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesAdotInstallUpdateFlow(test) + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } func TestVSphereKubernetes126CuratedPackagesAdotUpdateFlow(t *testing.T) { @@ -676,12 +664,12 @@ func TestVSphereKubernetes129CuratedPackagesAdotUpdateFlow(t *testing.T) { runCuratedPackagesAdotInstallUpdateFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesAdotUpdateFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -736,18 +724,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesAdotUpdateFlow(t *testin runCuratedPackagesAdotInstallUpdateFlow(test) } -func TestVSphereKubernetes125UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { - minNodes := 1 - maxNodes := 2 +func TestVSphereKubernetes130BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runAutoscalerWithMetricsServerSimpleFlow(test) + runCuratedPackagesAdotInstallUpdateFlow(test) } func TestVSphereKubernetes126UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { @@ -806,14 +792,14 @@ func TestVSphereKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { minNodes := 1 maxNodes := 2 framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -862,16 +848,18 @@ func TestVSphereKubernetes128BottleRocketCuratedPackagesClusterAutoscalerSimpleF runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { + minNodes := 1 + maxNodes := 2 framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) + runAutoscalerWithMetricsServerSimpleFlow(test) } func TestVSphereKubernetes126UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { @@ -922,12 +910,12 @@ func TestVSphereKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow(t *testin runCuratedPackagesPrometheusInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -970,11 +958,16 @@ func TestVSphereKubernetes128BottleRocketCuratedPackagesPrometheusSimpleFlow(t * runCuratedPackagesPrometheusInstallSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageRemoteClusterInstallSimpleFlow(test) + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesPrometheusInstallSimpleFlow(test) } func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { @@ -1005,6 +998,13 @@ func TestVSphereKubernetes129UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *t runCuratedPackageRemoteClusterInstallSimpleFlow(test) } +func TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageRemoteClusterInstallSimpleFlow(test) +} + func TestVSphereMultipleTemplatesUbuntu127(t *testing.T) { framework.CheckVsphereMultiTemplateUbuntu127EnvVars(t) provider := framework.NewVSphere( @@ -1029,13 +1029,6 @@ func TestVSphereMultipleTemplatesUbuntu127(t *testing.T) { runMultiTemplatesSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithBottleRocket125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageRemoteClusterInstallSimpleFlow(test) -} - func TestVSphereKubernetes126BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) provider := framework.NewVSphere(t, framework.WithBottleRocket126()) @@ -1057,11 +1050,11 @@ func TestVSphereKubernetes128BottleRocketWorkloadClusterCuratedPackagesSimpleFlo runCuratedPackageRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageRemoteClusterInstallSimpleFlow(test) } func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -1085,10 +1078,10 @@ func TestVSphereKubernetes128UbuntuWorkloadClusterCuratedPackagesEmissarySimpleF runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithBottleRocket125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } @@ -1113,12 +1106,11 @@ func TestVSphereKubernetes128BottleRocketWorkloadClusterCuratedPackagesEmissaryS runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - framework.CheckCertManagerCredentials(t) - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCertManagerRemoteClusterInstallSimpleFlow(test) + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { @@ -1145,11 +1137,11 @@ func TestVSphereKubernetes128UbuntuWorkloadClusterCuratedPackagesCertManagerSimp runCertManagerRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) framework.CheckCertManagerCredentials(t) - provider := framework.NewVSphere(t, framework.WithBottleRocket125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) runCertManagerRemoteClusterInstallSimpleFlow(test) } @@ -1177,6 +1169,14 @@ func TestVSphereKubernetes128BottleRocketWorkloadClusterCuratedPackagesCertManag runCertManagerRemoteClusterInstallSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + framework.CheckCertManagerCredentials(t) + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCertManagerRemoteClusterInstallSimpleFlow(test) +} + // Download artifacts func TestVSphereDownloadArtifacts(t *testing.T) { test := framework.NewClusterE2ETest( @@ -1227,6 +1227,18 @@ func TestVSphereKubernetes129GitFlux(t *testing.T) { runFluxFlow(test) } +func TestVSphereKubernetes130GitFlux(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runFluxFlow(test) +} + func TestVSphereKubernetes128BottleRocketGithubFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewVSphere(t, framework.WithBottleRocket128()), @@ -1251,6 +1263,18 @@ func TestVSphereKubernetes129BottleRocketGithubFlux(t *testing.T) { runFluxFlow(test) } +func TestVSphereKubernetes130BottleRocketGithubFlux(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithFluxGithub(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runFluxFlow(test) +} + func TestVSphereKubernetes128BottleRocketGitFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewVSphere(t, framework.WithBottleRocket128()), @@ -1275,6 +1299,18 @@ func TestVSphereKubernetes129BottleRocketGitFlux(t *testing.T) { runFluxFlow(test) } +func TestVSphereKubernetes130BottleRocketGitFlux(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runFluxFlow(test) +} + func TestVSphereKubernetes127To128GitFluxUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) test := framework.NewClusterE2ETest(t, @@ -1311,18 +1347,36 @@ func TestVSphereKubernetes128To129GitFluxUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130GitFluxUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) + test := framework.NewClusterE2ETest(t, + provider, + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFlowWithFlux( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + func TestVSphereInstallGitFluxDuringUpgrade(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest(t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runUpgradeFlowWithFlux( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithFluxGit(), framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)), ) @@ -1381,6 +1435,32 @@ func TestVSphereKubernetes129UbuntuLabelsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuLabelsUpgradeFlow(t *testing.T) { + provider := ubuntu130ProviderWithLabels(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runLabelsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), + api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), + api.WithWorkerNodeGroup(worker2), + api.WithControlPlaneLabel(cpKey1, cpVal1), + ), + ) +} + func TestVSphereKubernetes128BottlerocketLabelsUpgradeFlow(t *testing.T) { provider := bottlerocket128ProviderWithLabels(t) @@ -1433,6 +1513,32 @@ func TestVSphereKubernetes129BottlerocketLabelsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketLabelsUpgradeFlow(t *testing.T) { + provider := bottlerocket130ProviderWithLabels(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runLabelsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), + api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), + api.WithWorkerNodeGroup(worker2), + api.WithControlPlaneLabel(cpKey1, cpVal1), + ), + ) +} + // Multicluster func TestVSphereKubernetes128MulticlusterWorkloadCluster(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu128()) @@ -1490,6 +1596,34 @@ func TestVSphereKubernetes129MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } +func TestVSphereKubernetes130MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := framework.NewMulticlusterE2ETest( + t, + framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + ) + runWorkloadClusterFlow(test) +} + func TestVSphereUpgradeMulticlusterWorkloadClusterWithGithubFlux(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu126()) test := framework.NewMulticlusterE2ETest( @@ -1531,19 +1665,6 @@ func TestVSphereUpgradeMulticlusterWorkloadClusterWithGithubFlux(t *testing.T) { } // OIDC -func TestVSphereKubernetes125OIDC(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runOIDCFlow(test) -} - func TestVSphereKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1596,6 +1717,19 @@ func TestVSphereKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } +func TestVSphereKubernetes130OIDC(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runOIDCFlow(test) +} + func TestVSphereKubernetes127To128OIDCUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) test := framework.NewClusterE2ETest( @@ -1644,6 +1778,20 @@ func TestVSphereKubernetes129UbuntuProxyConfigFlow(t *testing.T) { runProxyConfigFlow(test) } +func TestVSphereKubernetes130UbuntuProxyConfigFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), + framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + runProxyConfigFlow(test) +} + func TestVSphereKubernetes128BottlerocketProxyConfigFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1672,11 +1820,25 @@ func TestVSphereKubernetes129BottlerocketProxyConfigFlow(t *testing.T) { runProxyConfigFlow(test) } -// Registry mirror -func TestVSphereKubernetes128UbuntuRegistryMirrorInsecureSkipVerify(t *testing.T) { +func TestVSphereKubernetes130BottlerocketProxyConfigFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewVSphere(t, framework.WithUbuntu128(), framework.WithPrivateNetwork()), + framework.NewVSphere(t, framework.WithBottleRocket130(), + framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + runProxyConfigFlow(test) +} + +// Registry mirror +func TestVSphereKubernetes128UbuntuRegistryMirrorInsecureSkipVerify(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu128(), framework.WithPrivateNetwork()), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), @@ -1712,6 +1874,19 @@ func TestVSphereKubernetes129UbuntuRegistryMirrorAndCert(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130UbuntuRegistryMirrorAndCert(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestVSphereKubernetes128BottlerocketRegistryMirrorAndCert(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1738,6 +1913,19 @@ func TestVSphereKubernetes129BottlerocketRegistryMirrorAndCert(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130BottlerocketRegistryMirrorAndCert(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestVSphereKubernetes128UbuntuAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1764,6 +1952,19 @@ func TestVSphereKubernetes129UbuntuAuthenticatedRegistryMirror(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130UbuntuAuthenticatedRegistryMirror(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestVSphereKubernetes128BottlerocketAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1790,6 +1991,62 @@ func TestVSphereKubernetes129BottlerocketAuthenticatedRegistryMirror(t *testing. runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130BottlerocketAuthenticatedRegistryMirror(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + +func TestVSphereKubernetes129BottlerocketRegistryMirrorOciNamespaces(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket129(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithRegistryMirrorOciNamespaces(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + +func TestVSphereKubernetes130BottlerocketRegistryMirrorOciNamespaces(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorOciNamespaces(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + +func TestVSphereKubernetes129UbuntuAuthenticatedRegistryMirrorCuratedPackagesSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu129(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageInstallSimpleFlowRegistryMirror(test) +} + // Clone mode func TestVSphereKubernetes128FullClone(t *testing.T) { diskSize := 30 @@ -1829,6 +2086,25 @@ func TestVSphereKubernetes129FullClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } +func TestVSphereKubernetes130FullClone(t *testing.T) { + diskSize := 30 + vsphere := framework.NewVSphere(t, + framework.WithUbuntu130(), + framework.WithFullCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + + test := framework.NewClusterE2ETest( + t, + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + ) + runVSphereCloneModeFlow(test, vsphere, diskSize) +} + func TestVSphereKubernetes128LinkedClone(t *testing.T) { diskSize := 20 vsphere := framework.NewVSphere(t, @@ -1867,6 +2143,25 @@ func TestVSphereKubernetes129LinkedClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } +func TestVSphereKubernetes130LinkedClone(t *testing.T) { + diskSize := 20 + vsphere := framework.NewVSphere(t, + framework.WithUbuntu130(), + framework.WithLinkedCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + + test := framework.NewClusterE2ETest( + t, + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + ) + runVSphereCloneModeFlow(test, vsphere, diskSize) +} + func TestVSphereKubernetes128BottlerocketFullClone(t *testing.T) { diskSize := 30 vsphere := framework.NewVSphere(t, @@ -1905,6 +2200,25 @@ func TestVSphereKubernetes129BottlerocketFullClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } +func TestVSphereKubernetes130BottlerocketFullClone(t *testing.T) { + diskSize := 30 + vsphere := framework.NewVSphere(t, + framework.WithBottleRocket130(), + framework.WithFullCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + + test := framework.NewClusterE2ETest( + t, + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + ) + runVSphereCloneModeFlow(test, vsphere, diskSize) +} + func TestVSphereKubernetes128BottlerocketLinkedClone(t *testing.T) { diskSize := 22 vsphere := framework.NewVSphere(t, @@ -1943,16 +2257,26 @@ func TestVSphereKubernetes129BottlerocketLinkedClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } -// Simpleflow -func TestVSphereKubernetes125Ubuntu2004SimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottlerocketLinkedClone(t *testing.T) { + diskSize := 22 + vsphere := framework.NewVSphere(t, + framework.WithBottleRocket130(), + framework.WithLinkedCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + test := framework.NewClusterE2ETest( t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), ) - runSimpleFlow(test) + runVSphereCloneModeFlow(test, vsphere, diskSize) } +// Simpleflow func TestVSphereKubernetes126Ubuntu2004SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1989,15 +2313,13 @@ func TestVSphereKubernetes129Ubuntu2004SimpleFlow(t *testing.T) { runSimpleFlow(test) } -func TestVSphereKubernetes125Ubuntu2204SimpleFlow(t *testing.T) { - provider := framework.NewVSphere(t) +func TestVSphereKubernetes130Ubuntu2004SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) - runSimpleFlowWithoutClusterConfigGeneration(test) + runSimpleFlow(test) } func TestVSphereKubernetes126Ubuntu2204SimpleFlow(t *testing.T) { @@ -2044,13 +2366,15 @@ func TestVSphereKubernetes129Ubuntu2204SimpleFlow(t *testing.T) { runSimpleFlowWithoutClusterConfigGeneration(test) } -func TestVSphereKubernetes125RedHatSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130Ubuntu2204SimpleFlow(t *testing.T) { + provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( t, - framework.NewVSphere(t, framework.WithRedHat125VSphere()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2204, nil), ) - runSimpleFlow(test) + runSimpleFlowWithoutClusterConfigGeneration(test) } func TestVSphereKubernetes126RedHatSimpleFlow(t *testing.T) { @@ -2089,6 +2413,15 @@ func TestVSphereKubernetes129RedHatSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130RedHatSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithRedHat130VSphere()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2111,6 +2444,17 @@ func TestVSphereKubernetes129ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(5)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128DifferentNamespaceSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2131,6 +2475,16 @@ func TestVSphereKubernetes129DifferentNamespaceSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130DifferentNamespaceSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithVSphereFillers(api.WithVSphereConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes127BottleRocketSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2158,6 +2512,15 @@ func TestVSphereKubernetes129BottleRocketSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128BottleRocketThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2180,6 +2543,17 @@ func TestVSphereKubernetes129BottleRocketThreeReplicasFiveWorkersSimpleFlow(t *t runSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(5)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128BottleRocketDifferentNamespaceSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2202,6 +2576,17 @@ func TestVSphereKubernetes129BottleRocketDifferentNamespaceSimpleFlow(t *testing runSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketDifferentNamespaceSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), + framework.WithVSphereFillers(api.WithVSphereConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2239,6 +2624,19 @@ func TestVSphereKubernetes129BottleRocketWithNTP(t *testing.T) { runNTPFlow(test, v1alpha1.Bottlerocket) } +func TestVSphereKubernetes130BottleRocketWithNTP(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere( + t, framework.WithBottleRocket130(), + framework.WithNTPServersForAllMachines(), + framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty + ), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runNTPFlow(test, v1alpha1.Bottlerocket) +} + func TestVSphereKubernetes128UbuntuWithNTP(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2265,6 +2663,19 @@ func TestVSphereKubernetes129UbuntuWithNTP(t *testing.T) { runNTPFlow(test, v1alpha1.Ubuntu) } +func TestVSphereKubernetes130UbuntuWithNTP(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere( + t, framework.WithUbuntu130(), + framework.WithNTPServersForAllMachines(), + framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty + ), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runNTPFlow(test, v1alpha1.Ubuntu) +} + // Bottlerocket Configuration test func TestVSphereKubernetes128BottlerocketWithBottlerocketKubernetesSettings(t *testing.T) { test := framework.NewClusterE2ETest( @@ -2292,16 +2703,20 @@ func TestVSphereKubernetes129BottlerocketWithBottlerocketKubernetesSettings(t *t runBottlerocketConfigurationFlow(test) } -// Stacked etcd -func TestVSphereKubernetes125StackedEtcdUbuntu(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(3)), - framework.WithClusterFiller(api.WithStackedEtcdTopology())) - runStackedEtcdFlow(test) +func TestVSphereKubernetes130BottlerocketWithBottlerocketKubernetesSettings(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere( + t, framework.WithBottleRocket130(), + framework.WithBottlerocketKubernetesSettingsForAllMachines(), + framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty + ), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runBottlerocketConfigurationFlow(test) } +// Stacked etcd func TestVSphereKubernetes126StackedEtcdUbuntu(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewVSphere(t, framework.WithUbuntu126()), @@ -2338,6 +2753,15 @@ func TestVSphereKubernetes129StackedEtcdUbuntu(t *testing.T) { runStackedEtcdFlow(test) } +func TestVSphereKubernetes130StackedEtcdUbuntu(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithStackedEtcdTopology())) + runStackedEtcdFlow(test) +} + // Taints func TestVSphereKubernetes128UbuntuTaintsUpgradeFlow(t *testing.T) { provider := ubuntu128ProviderWithTaints(t) @@ -2391,6 +2815,32 @@ func TestVSphereKubernetes129UbuntuTaintsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuTaintsUpgradeFlow(t *testing.T) { + provider := ubuntu130ProviderWithTaints(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runTaintsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker2, api.WithNoTaints()), + api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}), + ), + ) +} + func TestVSphereKubernetes128BottlerocketTaintsUpgradeFlow(t *testing.T) { provider := bottlerocket128ProviderWithTaints(t) @@ -2443,6 +2893,32 @@ func TestVSphereKubernetes129BottlerocketTaintsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketTaintsUpgradeFlow(t *testing.T) { + provider := bottlerocket130ProviderWithTaints(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runTaintsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker2, api.WithNoTaints()), + api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}), + ), + ) +} + func TestVSphereKubernetes127UbuntuWorkloadClusterTaintsFlow(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) @@ -2539,19 +3015,18 @@ func TestVSphereKubernetes128UbuntuTo129Upgrade(t *testing.T) { ) } -func TestVSphereKubernetes125To126Ubuntu2204Upgrade(t *testing.T) { - provider := framework.NewVSphere(t) +func TestVSphereKubernetes129UbuntuTo130Upgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) - runSimpleUpgradeFlowWithoutClusterConfigGeneration( + runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -2603,6 +3078,22 @@ func TestVSphereKubernetes128To129Ubuntu2204Upgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130Ubuntu2204Upgrade(t *testing.T) { + provider := framework.NewVSphere(t) + test := framework.NewClusterE2ETest( + t, + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes130Template()), + ) +} + func TestVSphereKubernetes127To128Ubuntu2204StackedEtcdUpgrade(t *testing.T) { provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( @@ -2641,6 +3132,25 @@ func TestVSphereKubernetes128To129Ubuntu2204StackedEtcdUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130Ubuntu2204StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewVSphere(t) + test := framework.NewClusterE2ETest( + t, + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + api.ClusterToConfigFiller( + api.WithStackedEtcdTopology(), + ), + ) + runSimpleUpgradeFlowWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes130Template()), + ) +} + func TestVSphereKubernetes127To128RedHatUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithRedHat127VSphere()) test := framework.NewClusterE2ETest( @@ -2671,6 +3181,21 @@ func TestVSphereKubernetes128To129RedHatUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130RedHatUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithRedHat129VSphere()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat130Template()), + ) +} + func TestVSphereKubernetes127To128StackedEtcdRedHatUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithRedHat127VSphere()) test := framework.NewClusterE2ETest( @@ -2703,6 +3228,22 @@ func TestVSphereKubernetes128To129StackedEtcdRedHatUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130StackedEtcdRedHatUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithRedHat129VSphere()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat130Template()), + ) +} + func TestVSphereKubernetes126Ubuntu2004To2204Upgrade(t *testing.T) { provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( @@ -2767,6 +3308,22 @@ func TestVSphereKubernetes129Ubuntu2004To2204Upgrade(t *testing.T) { ) } +func TestVSphereKubernetes130Ubuntu2004To2204Upgrade(t *testing.T) { + provider := framework.NewVSphere(t) + test := framework.NewClusterE2ETest( + t, + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes130Template()), + ) +} + func TestVSphereKubernetes127UbuntuTo128InPlaceUpgradeCPOnly(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) kube127 := v1alpha1.Kube127 @@ -2846,19 +3403,46 @@ func TestVSphereKubernetes127UbuntuTo128UpgradeCiliumPolicyEnforcementMode(t *te ) } -func TestVSphereKubernetes127UbuntuTo128MultipleFieldsUpgrade(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereKubernetes127UbuntuTo128MultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu127()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube128, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + provider.WithProviderUpgrade( + provider.Ubuntu128Template(), + api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar), + api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate), + api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar), + api.WithFolderForAllMachines(vsphereFolderUpdateVar), + // Uncomment once we support tests with multiple machine configs + /*api.WithWorkloadVMsNumCPUs(vsphereWlVmNumCpuUpdateVar), + api.WithWorkloadVMsMemoryMiB(vsphereWlVmMemoryUpdate), + api.WithWorkloadDiskGiB(vsphereWlDiskGiBUpdate),*/ + // Uncomment the network field once upgrade starts working with it + // api.WithNetwork(vsphereNetwork2UpdateVar), + ), + ) +} + +func TestVSphereKubernetes128UbuntuTo129MultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu128()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), provider.WithProviderUpgrade( - provider.Ubuntu128Template(), + provider.Ubuntu129Template(), api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar), api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate), api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar), @@ -2873,19 +3457,19 @@ func TestVSphereKubernetes127UbuntuTo128MultipleFieldsUpgrade(t *testing.T) { ) } -func TestVSphereKubernetes128UbuntuTo129MultipleFieldsUpgrade(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes129UbuntuTo130MultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), provider.WithProviderUpgrade( - provider.Ubuntu129Template(), + provider.Ubuntu130Template(), api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar), api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate), api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar), @@ -2932,6 +3516,22 @@ func TestVSphereKubernetes129UbuntuControlPlaneNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuControlPlaneNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), + ) +} + func TestVSphereKubernetes128UbuntuWorkerNodeUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu128()) test := framework.NewClusterE2ETest( @@ -2964,6 +3564,22 @@ func TestVSphereKubernetes129UbuntuWorkerNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), + ) +} + func TestVSphereKubernetes127BottlerocketTo128Upgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -2994,6 +3610,21 @@ func TestVSphereKubernetes128BottlerocketTo129Upgrade(t *testing.T) { ) } +func TestVSphereKubernetes129BottlerocketTo130Upgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Bottlerocket130Template()), + ) +} + func TestVSphereKubernetes127BottlerocketTo128MultipleFieldsUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -3048,6 +3679,33 @@ func TestVSphereKubernetes128BottlerocketTo129MultipleFieldsUpgrade(t *testing.T ) } +func TestVSphereKubernetes129BottlerocketTo130MultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade( + provider.Bottlerocket130Template(), + api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar), + api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate), + api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar), + api.WithFolderForAllMachines(vsphereFolderUpdateVar), + // Uncomment once we support tests with multiple machine configs + /*api.WithWorkloadVMsNumCPUs(vsphereWlVmNumCpuUpdateVar), + api.WithWorkloadVMsMemoryMiB(vsphereWlVmMemoryUpdate), + api.WithWorkloadDiskGiB(vsphereWlDiskGiBUpdate),*/ + // Uncomment the network field once upgrade starts working with it + // api.WithNetwork(vsphereNetwork2UpdateVar), + ), + ) +} + func TestVSphereKubernetes128BottlerocketControlPlaneNodeUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket128()) test := framework.NewClusterE2ETest( @@ -3080,6 +3738,22 @@ func TestVSphereKubernetes129BottlerocketControlPlaneNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketControlPlaneNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), + ) +} + func TestVSphereKubernetes128BottlerocketWorkerNodeUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket128()) test := framework.NewClusterE2ETest( @@ -3112,6 +3786,22 @@ func TestVSphereKubernetes129BottlerocketWorkerNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), + ) +} + func TestVSphereKubernetes127UbuntuTo128StackedEtcdUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) test := framework.NewClusterE2ETest( @@ -3148,6 +3838,24 @@ func TestVSphereKubernetes128UbuntuTo129StackedEtcdUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129UbuntuTo130StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + func TestVSphereKubernetes127BottlerocketTo128StackedEtcdUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -3184,6 +3892,24 @@ func TestVSphereKubernetes128BottlerocketTo129StackedEtcdUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129BottlerocketTo130StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Bottlerocket130Template()), + ) +} + func TestVSphereKubernetes127UbuntuTo128UpgradeWithCheckpoint(t *testing.T) { var clusterOpts []framework.ClusterE2ETestOpt var clusterOpts2 []framework.ClusterE2ETestOpt @@ -3198,12 +3924,12 @@ func TestVSphereKubernetes127UbuntuTo128UpgradeWithCheckpoint(t *testing.T) { ) clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolforCPMachines(vsphereInvalidResourcePoolUpdateVar)), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolforCPMachines(vsphereInvalidResourcePoolUpdateVar)), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) commandOpts := []framework.CommandOpt{framework.WithControlPlaneWaitTimeout("10m")} clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolForAllMachines(os.Getenv(vsphereResourcePoolVar))), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolForAllMachines(os.Getenv(vsphereResourcePoolVar))), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) runUpgradeFlowWithCheckpoint( test, @@ -3380,14 +4106,41 @@ func TestVSphereKubernetes128To129UbuntuUpgradeFromLatestMinorRelease(t *testing ) } -func TestVSphereKubernetes128To129UbuntuInPlaceUpgradeFromLatestMinorRelease(t *testing.T) { +func TestVSphereKubernetes129To130UbuntuUpgradeFromLatestMinorRelease(t *testing.T) { + release := latestMinorRelease(t) + provider := framework.NewVSphere(t, + framework.WithVSphereFillers( + api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu), + ), + framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube129, framework.Ubuntu2004, release), + ) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFromReleaseFlow( + test, + release, + v1alpha1.Kube130, + provider.WithProviderUpgrade( + provider.Ubuntu130Template(), // Set the template so it doesn't get autoimported + ), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) +} + +func TestVSphereKubernetes129To130UbuntuInPlaceUpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewVSphere( t, framework.WithVSphereFillers( api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu), ), - framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube128, framework.Ubuntu2004, release), + framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube129, framework.Ubuntu2004, release), ) test := framework.NewClusterE2ETest( t, @@ -3397,7 +4150,7 @@ func TestVSphereKubernetes128To129UbuntuInPlaceUpgradeFromLatestMinorRelease(t * test.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release)) test.UpdateClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithStackedEtcdTopology(), ), api.VSphereToConfigFiller( @@ -3408,10 +4161,10 @@ func TestVSphereKubernetes128To129UbuntuInPlaceUpgradeFromLatestMinorRelease(t * test, release, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu129Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -3509,6 +4262,33 @@ func TestVSphereKubernetes128To129RedhatUpgradeFromLatestMinorRelease(t *testing ) } +func TestVSphereKubernetes129To130RedhatUpgradeFromLatestMinorRelease(t *testing.T) { + release := latestMinorRelease(t) + provider := framework.NewVSphere(t, + framework.WithVSphereFillers( + api.WithOsFamilyForAllMachines(v1alpha1.RedHat), + ), + framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube129, framework.RedHat8, release), + ) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFromReleaseFlow( + test, + release, + v1alpha1.Kube130, + provider.WithProviderUpgrade( + provider.Redhat130Template(), // Set the template so it doesn't get auto-imported + ), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) +} + func TestVSphereKubernetes125UbuntuUpgradeAndRemoveWorkerNodeGroupsAPI(t *testing.T) { provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( @@ -3582,30 +4362,6 @@ func TestVSphereKubernetes127to128UpgradeFromLatestMinorReleaseBottleRocketAPI(t ) } -func TestVSphereKubernetes125UbuntuTo126InPlaceUpgrade_1CP_1Worker(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), - ).WithClusterConfig( - api.ClusterToConfigFiller( - api.WithControlPlaneCount(1), - api.WithWorkerNodeCount(1), - api.WithStackedEtcdTopology(), - api.WithInPlaceUpgradeStrategy(), - ), - api.VSphereToConfigFiller(api.RemoveEtcdVsphereMachineConfig()), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), - ) - - runInPlaceUpgradeFlow( - test, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), - ) -} - func TestVSphereKubernetes126UbuntuTo127InPlaceUpgrade_3CP_1Worker(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu126()) test := framework.NewClusterE2ETest( @@ -3678,33 +4434,50 @@ func TestVSphereKubernetes128UbuntuTo129InPlaceUpgrade_3CP_3Worker(t *testing.T) ) } -func TestVSphereKubernetes125UbuntuTo128InPlaceUpgrade(t *testing.T) { - var kube126clusterOpts []framework.ClusterE2ETestOpt +func TestVSphereKubernetes129UbuntuTo130InPlaceUpgrade_1CP_1Worker(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), + ).WithClusterConfig( + api.ClusterToConfigFiller( + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + api.WithInPlaceUpgradeStrategy(), + ), + api.VSphereToConfigFiller(api.RemoveEtcdVsphereMachineConfig()), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + ) + + runInPlaceUpgradeFlow( + test, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + +func TestVSphereKubernetes126UbuntuTo130InPlaceUpgrade(t *testing.T) { var kube127clusterOpts []framework.ClusterE2ETestOpt var kube128clusterOpts []framework.ClusterE2ETestOpt - provider := framework.NewVSphere(t, framework.WithUbuntu125()) + var kube129clusterOpts []framework.ClusterE2ETestOpt + var kube130clusterOpts []framework.ClusterE2ETestOpt + provider := framework.NewVSphere(t, framework.WithUbuntu126()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), - ) - kube126clusterOpts = append( - kube126clusterOpts, - framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube126), - api.WithInPlaceUpgradeStrategy(), - ), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), + provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), ) kube127clusterOpts = append( kube127clusterOpts, @@ -3722,23 +4495,40 @@ func TestVSphereKubernetes125UbuntuTo128InPlaceUpgrade(t *testing.T) { ), provider.WithProviderUpgrade(provider.Ubuntu128Template()), ) + kube129clusterOpts = append( + kube129clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithInPlaceUpgradeStrategy(), + ), + provider.WithProviderUpgrade(provider.Ubuntu129Template()), + ) + kube130clusterOpts = append( + kube130clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithInPlaceUpgradeStrategy(), + ), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) runInPlaceMultipleUpgradesFlow( test, - kube126clusterOpts, kube127clusterOpts, kube128clusterOpts, + kube129clusterOpts, + kube130clusterOpts, ) } -func TestVSphereKubernetes128UbuntuInPlaceCPScaleUp1To3(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceCPScaleUp1To3(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -3747,7 +4537,7 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleUp1To3(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3758,15 +4548,15 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleUp1To3(t *testing.T) { ) } -func TestVSphereKubernetes128UbuntuInPlaceCPScaleDown3To1(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceCPScaleDown3To1(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -3775,7 +4565,7 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleDown3To1(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3786,15 +4576,15 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleDown3To1(t *testing.T) { ) } -func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -3803,7 +4593,7 @@ func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3814,15 +4604,15 @@ func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { ) } -func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleDown2To1(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceWorkerScaleDown2To1(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(2), api.WithStackedEtcdTopology(), @@ -3831,7 +4621,7 @@ func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleDown2To1(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3848,22 +4638,22 @@ func TestVSphereKubernetes128UpgradeManagementComponents(t *testing.T) { runUpgradeManagementComponentsFlow(t, release, provider, v1alpha1.Kube128, framework.Ubuntu2004) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade125To126(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu125()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade126To127(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu126()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -3875,42 +4665,42 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade125To126(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), + provider.WithProviderUpgrade(provider.Ubuntu127Template()), ) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade126To127(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu126()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade127To128(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu127()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -3922,42 +4712,42 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade126To127(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu127Template()), + provider.WithProviderUpgrade(provider.Ubuntu128Template()), ) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade127To128(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu128()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -3969,42 +4759,42 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade127To128(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + provider.WithProviderUpgrade(provider.Ubuntu129Template()), ) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade129To130(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -4016,23 +4806,23 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu129Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -4461,6 +5251,7 @@ func TestVSphereUpgradeKubernetesCiliumUbuntuGitHubFluxAPI(t *testing.T) { test.DeleteManagementCluster() } +// Airgapped tests func TestVSphereKubernetes128UbuntuAirgappedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4489,6 +5280,48 @@ func TestVSphereKubernetes129UbuntuAirgappedRegistryMirror(t *testing.T) { runAirgapConfigFlow(test, "195.18.0.1/16,196.18.0.1/16") } +func TestVSphereKubernetes130UbuntuAirgappedRegistryMirror(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName), + ) + + runAirgapConfigFlow(test, "195.18.0.1/16,196.18.0.1/16") +} + +func TestVSphereKubernetes129UbuntuAirgappedProxy(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu129(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + + runAirgapConfigProxyFlow(test, "195.18.0.1/16,196.18.0.1/16") +} + +func TestVSphereKubernetes130UbuntuAirgappedProxy(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + + runAirgapConfigProxyFlow(test, "195.18.0.1/16,196.18.0.1/16") +} + func TestVSphereKubernetesUbuntu128EtcdEncryption(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4531,6 +5364,27 @@ func TestVSphereKubernetesUbuntu129EtcdEncryption(t *testing.T) { test.DeleteCluster() } +func TestVSphereKubernetesUbuntu130EtcdEncryption(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + ), + framework.WithPodIamConfig(), + ) + test.OSFamily = v1alpha1.Ubuntu + test.GenerateClusterConfig() + test.CreateCluster() + test.PostClusterCreateEtcdEncryptionSetup() + test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{framework.WithEtcdEncrytion()}) + test.StopIfFailed() + test.ValidateEtcdEncryption() + test.DeleteCluster() +} + func TestVSphereKubernetesBottlerocket128EtcdEncryption(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4571,6 +5425,26 @@ func TestVSphereKubernetesBottlerocket129EtcdEncryption(t *testing.T) { test.DeleteCluster() } +func TestVSphereKubernetesBottlerocket130EtcdEncryption(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + ), + framework.WithPodIamConfig(), + ) + test.OSFamily = v1alpha1.Bottlerocket + test.GenerateClusterConfig() + test.CreateCluster() + test.PostClusterCreateEtcdEncryptionSetup() + test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{framework.WithEtcdEncrytion()}) + test.StopIfFailed() + test.DeleteCluster() +} + func ubuntu128ProviderWithLabels(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4611,6 +5485,26 @@ func ubuntu129ProviderWithLabels(t *testing.T) *framework.VSphere { ) } +func ubuntu130ProviderWithLabels(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.WithWorkerNodeGroup(worker0, api.WithCount(2), + api.WithLabel(key1, val2)), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.WithWorkerNodeGroup(worker2, api.WithCount(1), + api.WithLabel(key2, val2)), + ), + framework.WithUbuntu130(), + ) +} + func bottlerocket128ProviderWithLabels(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4651,6 +5545,26 @@ func bottlerocket129ProviderWithLabels(t *testing.T) *framework.VSphere { ) } +func bottlerocket130ProviderWithLabels(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.WithWorkerNodeGroup(worker0, api.WithCount(2), + api.WithLabel(key1, val2)), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.WithWorkerNodeGroup(worker2, api.WithCount(1), + api.WithLabel(key2, val2)), + ), + framework.WithBottleRocket130(), + ) +} + func ubuntu128ProviderWithTaints(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4687,6 +5601,24 @@ func ubuntu129ProviderWithTaints(t *testing.T) *framework.VSphere { ) } +func ubuntu130ProviderWithTaints(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.NoScheduleWorkerNodeGroup(worker0, 2), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), + ), + framework.WithUbuntu130(), + ) +} + func bottlerocket128ProviderWithTaints(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4723,6 +5655,24 @@ func bottlerocket129ProviderWithTaints(t *testing.T) *framework.VSphere { ) } +func bottlerocket130ProviderWithTaints(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.NoScheduleWorkerNodeGroup(worker0, 2), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), + ), + framework.WithBottleRocket130(), + ) +} + func runVSphereCloneModeFlow(test *framework.ClusterE2ETest, vsphere *framework.VSphere, diskSize int) { test.GenerateClusterConfig() test.CreateCluster() @@ -4839,6 +5789,27 @@ func TestVSphereKubernetes129BottlerocketEtcdScaleDown(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketEtcdScaleDown(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(3), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + ), + ) + + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithExternalEtcdTopology(1), + ), + ) +} + func TestVSphereKubernetes127to128BottlerocketEtcdScaleUp(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -4929,6 +5900,27 @@ func TestVSphereKubernetes129UbuntuEtcdScaleUp(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuEtcdScaleUp(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + ), + ) + + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithExternalEtcdTopology(3), + ), + ) +} + func TestVSphereKubernetes128UbuntuEtcdScaleDown(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4971,13 +5963,34 @@ func TestVSphereKubernetes129UbuntuEtcdScaleDown(t *testing.T) { ) } -func TestVSphereKubernetes127to128UbuntuEtcdScaleUp(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereKubernetes130UbuntuEtcdScaleDown(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(3), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + ), + ) + + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithExternalEtcdTopology(1), + ), + ) +} + +func TestVSphereKubernetes129to130UbuntuEtcdScaleUp(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -4986,22 +5999,22 @@ func TestVSphereKubernetes127to128UbuntuEtcdScaleUp(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(3), ), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } -func TestVSphereKubernetes127to128UbuntuEtcdScaleDown(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereKubernetes129to130UbuntuEtcdScaleDown(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(3), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -5010,11 +6023,11 @@ func TestVSphereKubernetes127to128UbuntuEtcdScaleDown(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), ), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } diff --git a/test/framework/cloudstack.go b/test/framework/cloudstack.go index 5f644a7df5a9..439a2243efff 100644 --- a/test/framework/cloudstack.go +++ b/test/framework/cloudstack.go @@ -195,6 +195,11 @@ func WithCloudStackRedhat129() CloudStackOpt { return withCloudStackKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } +// WithCloudStackRedhat130 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.30. +func WithCloudStackRedhat130() CloudStackOpt { + return withCloudStackKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + // WithCloudStackRedhat9Kubernetes125 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.25. func WithCloudStackRedhat9Kubernetes125() CloudStackOpt { return withCloudStackKubeVersionAndOS(anywherev1.Kube125, RedHat9, nil) @@ -220,6 +225,11 @@ func WithCloudStackRedhat9Kubernetes129() CloudStackOpt { return withCloudStackKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// WithCloudStackRedhat9Kubernetes130 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.30. +func WithCloudStackRedhat9Kubernetes130() CloudStackOpt { + return withCloudStackKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + func WithCloudStackFillers(fillers ...api.CloudStackFiller) CloudStackOpt { return func(c *CloudStack) { c.fillers = append(c.fillers, fillers...) @@ -254,8 +264,9 @@ func (c *CloudStack) ClusterConfigUpdates() []api.ClusterConfigFiller { return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...), api.CloudStackToConfigFiller(c.fillers...)} } -func (c *CloudStack) CleanupVMs(clusterName string) error { - return cleanup.CleanUpCloudstackTestResources(context.Background(), clusterName, false) +// CleanupResources satisfies the test framework Provider. +func (c *CloudStack) CleanupResources(clusterName string) error { + return cleanup.CloudstackTestResources(context.Background(), clusterName, false, false) } func (c *CloudStack) WithProviderUpgrade(fillers ...api.CloudStackFiller) ClusterE2ETestOpt { @@ -362,7 +373,12 @@ func (c *CloudStack) Redhat128Template() api.CloudStackFiller { // Redhat129Template returns cloudstack filler for 1.29 RedHat. func (c *CloudStack) Redhat129Template() api.CloudStackFiller { - return c.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) + return c.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) +} + +// Redhat130Template returns cloudstack filler for 1.30 RedHat. +func (c *CloudStack) Redhat130Template() api.CloudStackFiller { + return c.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) } // Redhat9Kubernetes125Template returns cloudstack filler for 1.25 RedHat. @@ -390,6 +406,11 @@ func (c *CloudStack) Redhat9Kubernetes129Template() api.CloudStackFiller { return c.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// Redhat9Kubernetes130Template returns cloudstack filler for 1.30 RedHat. +func (c *CloudStack) Redhat9Kubernetes130Template() api.CloudStackFiller { + return c.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + func buildCloudStackWorkerNodeGroupClusterFiller(machineConfigName string, workerNodeGroup *WorkerNodeGroup) api.ClusterFiller { // Set worker node group ref to cloudstack machine config workerNodeGroup.MachineConfigKind = anywherev1.CloudStackMachineConfigKind @@ -409,7 +430,7 @@ func (c *CloudStack) ClusterStateValidations() []clusterf.StateValidation { // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all // cloudstack machine configs. -func (c *CloudStack) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (c *CloudStack) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), api.CloudStackToConfigFiller( @@ -448,6 +469,12 @@ func (c *CloudStack) WithRedhat129() api.ClusterConfigFiller { return c.WithKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } +// WithRedhat130 returns a cluster config filler that sets the kubernetes version of the cluster to 1.30 +// as well as the right redhat template for all CloudStackMachineConfigs. +func (c *CloudStack) WithRedhat130() api.ClusterConfigFiller { + return c.WithKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + // WithRedhat9Kubernetes125 returns a cluster config filler that sets the kubernetes version of the cluster to 1.25 // as well as the right redhat template for all CloudStackMachineConfigs. func (c *CloudStack) WithRedhat9Kubernetes125() api.ClusterConfigFiller { @@ -478,6 +505,12 @@ func (c *CloudStack) WithRedhat9Kubernetes129() api.ClusterConfigFiller { return c.WithKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// WithRedhat9Kubernetes130 returns a cluster config filler that sets the kubernetes version of the cluster to 1.30 +// as well as the right redhat template for all CloudStackMachineConfigs. +func (c *CloudStack) WithRedhat9Kubernetes130() api.ClusterConfigFiller { + return c.WithKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + // WithRedhatVersion returns a cluster config filler that sets the kubernetes version of the cluster to the k8s // version provider, as well as the right redhat template for all CloudStackMachineConfigs. func (c *CloudStack) WithRedhatVersion(version anywherev1.KubernetesVersion) api.ClusterConfigFiller { @@ -492,6 +525,8 @@ func (c *CloudStack) WithRedhatVersion(version anywherev1.KubernetesVersion) api return c.WithRedhat128() case anywherev1.Kube129: return c.WithRedhat129() + case anywherev1.Kube130: + return c.WithRedhat130() default: return nil } diff --git a/test/framework/cluster.go b/test/framework/cluster.go index d5ce8fe297e6..4f4ebbfcf66e 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -59,17 +59,22 @@ const ( JobIdVar = "T_JOB_ID" BundlesOverrideVar = "T_BUNDLES_OVERRIDE" ClusterIPPoolEnvVar = "T_CLUSTER_IP_POOL" - CleanupVmsVar = "T_CLEANUP_VMS" + ClusterIPEnvVar = "T_CLUSTER_IP" + CleanupResourcesVar = "T_CLEANUP_RESOURCES" hardwareYamlPath = "hardware.yaml" hardwareCsvPath = "hardware.csv" EksaPackagesInstallation = "eks-anywhere-packages" + bundleReleasePathFromArtifacts = "./eks-anywhere-downloads/bundle-release.yaml" ) //go:embed testdata/oidc-roles.yaml var oidcRoles []byte -//go:embed testdata/hpa_busybox.yaml -var hpaBusybox []byte +//go:embed testdata/autoscaler_load.yaml +var autoscalerLoad []byte + +//go:embed testdata/local-path-storage.yaml +var localPathProvisioner []byte type ClusterE2ETest struct { T T @@ -142,9 +147,9 @@ func NewClusterE2ETest(t T, provider Provider, opts ...ClusterE2ETestOpt) *Clust provider.Setup() e.T.Cleanup(func() { - e.CleanupVms() + e.cleanupResources() - tinkerbellCIEnvironment := os.Getenv(TinkerbellCIEnvironment) + tinkerbellCIEnvironment := os.Getenv(tinkerbellCIEnvironmentEnvVar) if e.Provider.Name() == tinkerbellProviderName && tinkerbellCIEnvironment == "true" { e.CleanupDockerEnvironment() } @@ -335,10 +340,10 @@ type Provider interface { // Prefer to call UpdateClusterConfig directly from the tests to make it more explicit. ClusterConfigUpdates() []api.ClusterConfigFiller Setup() - CleanupVMs(clusterName string) error + CleanupResources(clusterName string) error UpdateKubeConfig(content *[]byte, clusterName string) error ClusterStateValidations() []clusterf.StateValidation - WithKubeVersionAndOS(kubeVersion v1alpha1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller + WithKubeVersionAndOS(kubeVersion v1alpha1.KubernetesVersion, os OS, release *releasev1.EksARelease, rtos ...bool) api.ClusterConfigFiller WithNewWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller } @@ -356,53 +361,8 @@ func newBmclibClient(log logr.Logger, hostIP, username, password string) *bmclib return client } -// powerOffHardware issues power off calls to all Hardware. This function does not fail the test if it encounters an error. -// This function is a helper and not part of the code path that we are testing. -// For this reason, we are only logging the errors and not failing the test. -// This function exists not because we need the hardware to be powered off before a test run, -// but because we want to make sure that no other Tinkerbell Boots DHCP server is running. -// Another Boots DHCP server running can cause netboot issues with hardware. -func (e *ClusterE2ETest) powerOffHardware() { - for _, h := range e.TestHardware { - ctx, done := context.WithTimeout(context.Background(), 2*time.Minute) - defer done() - bmcClient := newBmclibClient(logr.Discard(), h.BMCIPAddress, h.BMCUsername, h.BMCPassword) - - if err := bmcClient.Open(ctx); err != nil { - md := bmcClient.GetMetadata() - e.T.Logf("Failed to open connection to BMC: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) - - continue - } - md := bmcClient.GetMetadata() - e.T.Logf("Connected to BMC: hardware: %v, providersAttempted: %v, successfulProvider: %v", h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) - - defer func() { - if err := bmcClient.Close(ctx); err != nil { - md := bmcClient.GetMetadata() - e.T.Logf("BMC close connection failed: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.FailedProviderDetail) - } - }() - - state, err := bmcClient.GetPowerState(ctx) - if err != nil { - state = "unknown" - } - if strings.Contains(strings.ToLower(state), "off") { - return - } - - if _, err := bmcClient.SetPowerState(ctx, "off"); err != nil { - md := bmcClient.GetMetadata() - e.T.Logf("failed to power off hardware: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) - continue - } - } -} - // ValidateHardwareDecommissioned checks that the all hardware was powered off during the cluster deletion. -// This function tests that the hardware was powered off during the cluster deletion. If any hardware are not powered off -// this func calls powerOffHardware to power off the hardware and then fails this test. +// This function tests that the hardware was powered off during the cluster deletion. func (e *ClusterE2ETest) ValidateHardwareDecommissioned() { var failedToDecomm []*api.Hardware for _, h := range e.TestHardware { @@ -454,7 +414,6 @@ func (e *ClusterE2ETest) ValidateHardwareDecommissioned() { } if len(failedToDecomm) > 0 { - e.powerOffHardware() e.T.Fatalf("failed to decommission all hardware during cluster deletion") } } @@ -657,7 +616,7 @@ func (e *ClusterE2ETest) DownloadImages(opts ...CommandOpt) { if getBundlesOverride() == "true" { var bundleManifestLocation string if _, err := os.Stat(defaultDownloadArtifactsOutputLocation); err == nil { - bundleManifestLocation = "eks-anywhere-downloads/bundle-release.yaml" + bundleManifestLocation = bundleReleasePathFromArtifacts } else { bundleManifestLocation = defaultBundleReleaseManifestFile } @@ -678,7 +637,7 @@ func (e *ClusterE2ETest) ImportImages(opts ...CommandOpt) { registryMirrorHost := net.JoinHostPort(registyMirrorEndpoint, registryMirrorPort) var bundleManifestLocation string if _, err := os.Stat(defaultDownloadArtifactsOutputLocation); err == nil { - bundleManifestLocation = "eks-anywhere-downloads/bundle-release.yaml" + bundleManifestLocation = bundleReleasePathFromArtifacts } else { bundleManifestLocation = defaultBundleReleaseManifestFile } @@ -907,16 +866,17 @@ func (e *ClusterE2ETest) DeleteCluster(opts ...CommandOpt) { e.deleteCluster(opts...) } -// CleanupVms is a helper to clean up VMs. It is a noop if the T_CLEANUP_VMS environment variable +// cleanupResources is a helper to clean up test resources. It is a noop if the T_CLEANUP_RESOURCES environment variable // is false or unset. -func (e *ClusterE2ETest) CleanupVms() { - if !shouldCleanUpVms() { - e.T.Logf("Skipping VM cleanup") +func (e *ClusterE2ETest) cleanupResources() { + if !shouldCleanUpResources() { + e.T.Logf("Skipping provider resource cleanup") return } - if err := e.Provider.CleanupVMs(e.ClusterName); err != nil { - e.T.Logf("failed to clean up VMs: %v", err) + e.T.Logf("Cleaning up provider resources") + if err := e.Provider.CleanupResources(e.ClusterName); err != nil { + e.T.Logf("failed to clean up %s test resouces: %v", e.Provider.Name(), err) } } @@ -927,9 +887,9 @@ func (e *ClusterE2ETest) CleanupDockerEnvironment() { e.Run("docker", "rm", "-vf", "$(docker ps -a -q)", "||", "true") } -func shouldCleanUpVms() bool { - shouldCleanupVms, err := getCleanupVmsVar() - return err == nil && shouldCleanupVms +func shouldCleanUpResources() bool { + shouldCleanupResources, err := getCleanupResourcesVar() + return err == nil && shouldCleanupResources } func (e *ClusterE2ETest) deleteCluster(opts ...CommandOpt) { @@ -954,6 +914,15 @@ func (e *ClusterE2ETest) GenerateSupportBundleOnCleanupIfTestFailed(opts ...Comm }) } +// GenerateSupportBundleIfTestFailed runs generates a support bundle if the test failed. +func (e *ClusterE2ETest) GenerateSupportBundleIfTestFailed(opts ...CommandOpt) { + if e.T.Failed() { + e.T.Log("Generating support bundle for failed test") + generateSupportBundleArgs := []string{"generate", "support-bundle", "-f", e.ClusterConfigLocation} + e.RunEKSA(generateSupportBundleArgs, opts...) + } +} + func (e *ClusterE2ETest) Run(name string, args ...string) { cmd, err := prepareCommand(name, args...) if err != nil { @@ -1010,14 +979,6 @@ func (e *ClusterE2ETest) StopIfFailed() { } } -func (e *ClusterE2ETest) cleanup(f func()) { - e.T.Cleanup(func() { - if !e.T.Failed() { - f() - } - }) -} - // Cluster builds a cluster obj using the ClusterE2ETest name and kubeconfig. func (e *ClusterE2ETest) Cluster() *types.Cluster { return &types.Cluster{ @@ -1111,8 +1072,8 @@ func getBundlesOverride() string { return os.Getenv(BundlesOverrideVar) } -func getCleanupVmsVar() (bool, error) { - return strconv.ParseBool(os.Getenv(CleanupVmsVar)) +func getCleanupResourcesVar() (bool, error) { + return strconv.ParseBool(os.Getenv(CleanupResourcesVar)) } func setEksctlVersionEnvVar() error { @@ -1274,9 +1235,7 @@ func (e *ClusterE2ETest) UninstallCuratedPackage(packagePrefix string, opts ...s func (e *ClusterE2ETest) InstallLocalStorageProvisioner() { ctx := context.Background() - _, err := e.KubectlClient.ExecuteCommand(ctx, "apply", "-f", - "https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml", - "--kubeconfig", e.KubeconfigFilePath()) + err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), localPathProvisioner) if err != nil { e.T.Fatalf("Error installing local-path-provisioner: %v", err) } @@ -1286,7 +1245,25 @@ func (e *ClusterE2ETest) InstallLocalStorageProvisioner() { func (e *ClusterE2ETest) WithCluster(f func(e *ClusterE2ETest)) { e.GenerateClusterConfig() e.CreateCluster() - defer e.DeleteCluster() + defer func() { + e.GenerateSupportBundleIfTestFailed() + e.DeleteCluster() + }() + f(e) +} + +// WithClusterRegistryMirror helps with bringing up and tearing down E2E test clusters when using registry mirror. +func (e *ClusterE2ETest) WithClusterRegistryMirror(f func(e *ClusterE2ETest)) { + e.GenerateClusterConfig() + e.DownloadArtifacts() + e.ExtractDownloadedArtifacts() + e.DownloadImages() + e.ImportImages() + e.CreateCluster(WithBundlesOverride(bundleReleasePathFromArtifacts)) + defer func() { + e.GenerateSupportBundleIfTestFailed() + e.DeleteCluster(WithBundlesOverride(bundleReleasePathFromArtifacts)) + }() f(e) } @@ -1374,7 +1351,6 @@ func (e *ClusterE2ETest) printDeploymentSpec(ctx context.Context, ns string) { func (e *ClusterE2ETest) VerifyHelloPackageInstalled(packageName string, mgmtCluster *types.Cluster) { ctx := context.Background() packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName) - e.GenerateSupportBundleOnCleanupIfTestFailed() // Log Package/Deployment outputs defer func() { @@ -1407,7 +1383,6 @@ func (e *ClusterE2ETest) VerifyHelloPackageInstalled(packageName string, mgmtClu func (e *ClusterE2ETest) VerifyAdotPackageInstalled(packageName, targetNamespace string) { ctx := context.Background() packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName) - e.GenerateSupportBundleOnCleanupIfTestFailed() e.T.Log("Waiting for package", packageName, "to be installed") err := e.KubectlClient.WaitForPackagesInstalled(ctx, @@ -1610,7 +1585,7 @@ func (e *ClusterE2ETest) TestEmissaryPackageRouting(packageName, checkName strin ctx := context.Background() packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName) - err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), emisarryPackage) + err := e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), emisarryPackage, packageMetadatNamespace) if err != nil { e.T.Errorf("Error upgrading emissary package: %v", err) return @@ -1634,6 +1609,8 @@ func (e *ClusterE2ETest) TestEmissaryPackageRouting(packageName, checkName strin e.T.Errorf("Error applying roles for oids: %v", err) return } + e.T.Log("Waiting for hello service") + time.Sleep(60 * time.Second) // Functional testing of Emissary Ingress ingresssvcAddress := checkName + "." + constants.EksaPackagesName + ".svc.cluster.local" @@ -1992,49 +1969,46 @@ func (e *ClusterE2ETest) InstallAutoScalerWithMetricServer(targetNamespace strin // CombinedAutoScalerMetricServerTest verifies that new nodes are spun up after using a HPA to scale a deployment. func (e *ClusterE2ETest) CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace string, mgmtCluster *types.Cluster) { ctx := context.Background() - ns := "default" - name := "hpa-busybox-test" machineDeploymentName := e.ClusterName + "-" + "md-0" + autoscalerDeploymentName := "cluster-autoscaler-clusterapi-cluster-autoscaler" e.VerifyMetricServerPackageInstalled(metricServerName, targetNamespace, mgmtCluster) e.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster) - e.T.Log("Metrics Server and Cluster Autoscaler ready") - err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, mgmtCluster, hpaBusybox) - if err != nil { - e.T.Fatalf("Failed to apply hpa busybox load %s", err) - } - e.T.Log("Deploying test workload") - - err = e.KubectlClient.WaitForDeployment(ctx, - e.Cluster(), "5m", "Available", name, ns) + err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, mgmtCluster, autoscalerLoad) if err != nil { - e.T.Fatalf("Failed waiting for test workload deployent %s", err) + e.T.Fatalf("Failed to apply autoscaler load %s", err) } - params := []string{"autoscale", "deployment", name, "--cpu-percent=50", "--min=1", "--max=20", "--kubeconfig", e.KubeconfigFilePath()} - _, err = e.KubectlClient.ExecuteCommand(ctx, params...) + // There is a bug in cluster autoscaler currently where it's not able to autoscale the cluster + // because of missing permissions on infrastructure machine template. + // Cluster Autoscaler does restart after ~10 min after which it starts functioning normally. + // We are force triggering a restart so the e2e doesn't have to wait 10 min for the restart. + // This can be removed once the following issue is resolve upstream. + // https://github.com/kubernetes/autoscaler/issues/6490 + _, err = e.KubectlClient.ExecuteCommand(ctx, "rollout", "restart", "deployment", "-n", targetNamespace, autoscalerDeploymentName, "--kubeconfig", e.KubeconfigFilePath()) if err != nil { - e.T.Fatalf("Failed to autoscale deployent: %s", err) + e.T.Fatalf("Failed to rollout cluster autoscaler %s", err) } + e.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster) e.T.Log("Waiting for machinedeployment to begin scaling up") - err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "20m", "status.phase", "ScalingUp", + err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "10m", "status.phase", "ScalingUp", fmt.Sprintf("machinedeployments.cluster.x-k8s.io/%s", machineDeploymentName), constants.EksaSystemNamespace) if err != nil { e.T.Fatalf("Failed to get ScalingUp phase for machinedeployment: %s", err) } e.T.Log("Waiting for machinedeployment to finish scaling up") - err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "15m", "status.phase", "Running", + err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "20m", "status.phase", "Running", fmt.Sprintf("machinedeployments.cluster.x-k8s.io/%s", machineDeploymentName), constants.EksaSystemNamespace) if err != nil { e.T.Fatalf("Failed to get Running phase for machinedeployment: %s", err) } - err = e.KubectlClient.WaitForMachineDeploymentReady(ctx, mgmtCluster, "2m", + err = e.KubectlClient.WaitForMachineDeploymentReady(ctx, mgmtCluster, "5m", machineDeploymentName) if err != nil { e.T.Fatalf("Machine deployment stuck in scaling up: %s", err) @@ -2109,7 +2083,6 @@ func (e *ClusterE2ETest) MatchLogs(targetNamespace, targetPodName string, ) { e.T.Logf("Match logs for pod %s, container %s in namespace %s", targetPodName, targetContainerName, targetNamespace) - e.GenerateSupportBundleOnCleanupIfTestFailed() err := retrier.New(timeout).Retry(func() error { logs, err := e.KubectlClient.GetPodLogs(context.TODO(), targetNamespace, diff --git a/test/framework/config/pod-identity-webhook.yaml b/test/framework/config/pod-identity-webhook.yaml index 79d81f4fdef9..ae6247217a14 100644 --- a/test/framework/config/pod-identity-webhook.yaml +++ b/test/framework/config/pod-identity-webhook.yaml @@ -95,7 +95,7 @@ spec: serviceAccountName: pod-identity-webhook containers: - name: pod-identity-webhook - image: amazon/amazon-eks-pod-identity-webhook:latest + image: {{ .podIdentityWebhookImage }} imagePullPolicy: Always command: - /webhook diff --git a/test/framework/docker.go b/test/framework/docker.go index 58842e1f5717..430a1154feaa 100644 --- a/test/framework/docker.go +++ b/test/framework/docker.go @@ -39,8 +39,8 @@ func (d *Docker) Name() string { // Setup implements the Provider interface. func (d *Docker) Setup() {} -// CleanupVMs implements the Provider interface. -func (d *Docker) CleanupVMs(_ string) error { +// CleanupResources implements the Provider interface. +func (d *Docker) CleanupResources(_ string) error { return nil } @@ -85,7 +85,7 @@ func (d *Docker) ClusterStateValidations() []clusterf.StateValidation { } // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version. -func (d *Docker) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (d *Docker) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, _ OS, _ *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), ) diff --git a/test/framework/etcdencryption.go b/test/framework/etcdencryption.go index dcd68e8ee246..4d30ad31c550 100644 --- a/test/framework/etcdencryption.go +++ b/test/framework/etcdencryption.go @@ -28,22 +28,24 @@ import ( ) const ( - irsaS3BucketVar = "T_IRSA_S3_BUCKET" - kmsIAMRoleVar = "T_KMS_IAM_ROLE" - kmsImageVar = "T_KMS_IMAGE" - kmsKeyArn = "T_KMS_KEY_ARN" - kmsKeyRegion = "T_KMS_KEY_REGION" - kmsSocketVar = "T_KMS_SOCKET" - - defaultRegion = "us-west-2" - keysFilename = "keys.json" + irsaS3BucketVar = "T_IRSA_S3_BUCKET" + kmsIAMRoleVar = "T_KMS_IAM_ROLE" + kmsImageVar = "T_KMS_IMAGE" + podIdentityWebhookImageVar = "T_POD_IDENTITY_WEBHOOK_IMAGE" + kmsKeyArn = "T_KMS_KEY_ARN" + kmsKeyRegion = "T_KMS_KEY_REGION" + kmsSocketVar = "T_KMS_SOCKET" + + defaultRegion = "us-west-2" + keysFilename = "keys.json" + keyIDFilenameFormat = "%s-oidc-keyid" // SSHKeyPath is the path where the SSH private key is stored on the test-runner instance. SSHKeyPath = "/tmp/ssh_key" ) //go:embed config/pod-identity-webhook.yaml -var podIdentityWebhookManifest []byte +var podIdentityWebhookManifest string //go:embed config/aws-kms-encryption-provider.yaml var kmsProviderManifest string @@ -54,27 +56,29 @@ type keyResponse struct { // etcdEncryptionTestVars stores all the environment variables needed by etcd encryption tests. type etcdEncryptionTestVars struct { - KmsKeyRegion string - S3Bucket string - KmsIamRole string - KmsImage string - KmsKeyArn string - KmsSocket string + KmsKeyRegion string + S3Bucket string + KmsIamRole string + KmsImage string + PodIdentityWebhookImage string + KmsKeyArn string + KmsSocket string } // RequiredEtcdEncryptionEnvVars returns the environment variables required . func RequiredEtcdEncryptionEnvVars() []string { - return []string{irsaS3BucketVar, kmsIAMRoleVar, kmsImageVar, kmsKeyArn, kmsSocketVar} + return []string{irsaS3BucketVar, kmsIAMRoleVar, kmsImageVar, podIdentityWebhookImageVar, kmsKeyArn, kmsSocketVar} } func getEtcdEncryptionVarsFromEnv() *etcdEncryptionTestVars { return &etcdEncryptionTestVars{ - KmsKeyRegion: os.Getenv(kmsKeyRegion), - S3Bucket: os.Getenv(irsaS3BucketVar), - KmsIamRole: os.Getenv(kmsIAMRoleVar), - KmsImage: os.Getenv(kmsImageVar), - KmsKeyArn: os.Getenv(kmsKeyArn), - KmsSocket: os.Getenv(kmsSocketVar), + KmsKeyRegion: os.Getenv(kmsKeyRegion), + S3Bucket: os.Getenv(irsaS3BucketVar), + KmsIamRole: os.Getenv(kmsIAMRoleVar), + KmsImage: os.Getenv(kmsImageVar), + PodIdentityWebhookImage: os.Getenv(podIdentityWebhookImageVar), + KmsKeyArn: os.Getenv(kmsKeyArn), + KmsSocket: os.Getenv(kmsSocketVar), } } @@ -187,6 +191,9 @@ func (e *ClusterE2ETest) PostClusterCreateEtcdEncryptionSetup() { e.T.Fatal(err) } + // register cleanup step to remove the keys from s3 after the test is done + e.T.Cleanup(e.cleanupKeysFromOIDCConfig) + if err := e.deployPodIdentityWebhook(ctx, envVars); err != nil { e.T.Fatal(err) } @@ -196,6 +203,64 @@ func (e *ClusterE2ETest) PostClusterCreateEtcdEncryptionSetup() { } } +// cleanup removes the cluster's key from the IAM OIDC config. +func (e *ClusterE2ETest) cleanupKeysFromOIDCConfig() { + e.T.Log("Removing cluster's key from the IAM OIDC config") + data, err := os.ReadFile(fmt.Sprintf(keyIDFilenameFormat, e.ClusterName)) + if err != nil { + e.T.Logf("failed to read key ID from file, skipping cleanup: %v", err) + return + } + + envVars := getEtcdEncryptionVarsFromEnv() + awsSession, err := session.NewSession(&aws.Config{ + Region: aws.String(defaultRegion), + }) + if err != nil { + e.T.Fatalf("creating aws session for cleanup: %v", err) + } + + // download the current keys json from S3 to add the current cluster's cert + content, err := s3.Download(awsSession, keysFilename, envVars.S3Bucket) + if err != nil { + e.T.Logf("downloading %s from s3: %v", keysFilename, err) + return + } + + resp := &keyResponse{} + if err = json.Unmarshal(content, resp); err != nil { + e.T.Logf("unmarshaling %s into json: %v", keysFilename, err) + return + } + + keyID := string(data) + index := -1 + for i, key := range resp.Keys { + if strings.EqualFold(keyID, key.KeyID) { + index = i + break + } + } + + if index >= 0 { + resp = &keyResponse{ + Keys: append(resp.Keys[0:index], resp.Keys[index+1:]...), + } + + keysJSON, err := json.MarshalIndent(resp, "", " ") + if err != nil { + e.T.Logf("marshaling keys.json: %v", err) + return + } + + // upload the modified keys json to s3 with the public read access + if err = s3.Upload(awsSession, keysJSON, keysFilename, envVars.S3Bucket, s3.WithPublicRead()); err != nil { + e.T.Logf("upload new keys.json to s3: %v", err) + return + } + } +} + func getIssuerURL() string { etcdEncryptionConfig := getEtcdEncryptionVarsFromEnv() return fmt.Sprintf("https://s3.%s.amazonaws.com/%s", defaultRegion, etcdEncryptionConfig.S3Bucket) @@ -203,7 +268,14 @@ func getIssuerURL() string { func (e *ClusterE2ETest) deployPodIdentityWebhook(ctx context.Context, envVars *etcdEncryptionTestVars) error { e.T.Log("Deploying Pod Identity Webhook") - if err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), podIdentityWebhookManifest); err != nil { + values := map[string]string{ + "podIdentityWebhookImage": envVars.PodIdentityWebhookImage, + } + manifest, err := templater.Execute(podIdentityWebhookManifest, values) + if err != nil { + return fmt.Errorf("templating pod identity webhook manifest: %v", err) + } + if err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), manifest); err != nil { return fmt.Errorf("deploying pod identity webhook: %v", err) } return nil @@ -265,6 +337,10 @@ func (e *ClusterE2ETest) addClusterCertToIrsaOidcProvider(ctx context.Context, e return fmt.Errorf("marshaling keys.json: %v", err) } + if err := os.WriteFile(fmt.Sprintf(keyIDFilenameFormat, e.ClusterName), []byte(newKey.KeyID), os.ModeAppend); err != nil { + return fmt.Errorf("writing OIDC key ID to file: %v", err) + } + // upload the modified keys json to s3 with the public read access if err = s3.Upload(awsSession, keysJSON, keysFilename, envVars.S3Bucket, s3.WithPublicRead()); err != nil { return fmt.Errorf("upload new keys.json to s3: %v", err) diff --git a/test/framework/nutanix.go b/test/framework/nutanix.go index 314a9c47304b..4a2f1879f44f 100644 --- a/test/framework/nutanix.go +++ b/test/framework/nutanix.go @@ -33,21 +33,21 @@ const ( nutanixControlPlaneCidrVar = "T_NUTANIX_CONTROL_PLANE_CIDR" nutanixPodCidrVar = "T_NUTANIX_POD_CIDR" nutanixServiceCidrVar = "T_NUTANIX_SERVICE_CIDR" - nutanixTemplateNameUbuntu125Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_25" nutanixTemplateNameUbuntu126Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_26" nutanixTemplateNameUbuntu127Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27" nutanixTemplateNameUbuntu128Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28" nutanixTemplateNameUbuntu129Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29" - nutanixTemplateNameRedHat125Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25" + nutanixTemplateNameUbuntu130Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_30" nutanixTemplateNameRedHat126Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26" nutanixTemplateNameRedHat127Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27" nutanixTemplateNameRedHat128Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28" nutanixTemplateNameRedHat129Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29" - nutanixTemplateNameRedHat9125Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25" + nutanixTemplateNameRedHat130Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_30" nutanixTemplateNameRedHat9126Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26" nutanixTemplateNameRedHat9127Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27" nutanixTemplateNameRedHat9128Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28" nutanixTemplateNameRedHat9129Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29" + nutanixTemplateNameRedHat9130Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_30" ) var requiredNutanixEnvVars = []string{ @@ -66,21 +66,21 @@ var requiredNutanixEnvVars = []string{ nutanixSubnetName, nutanixPodCidrVar, nutanixServiceCidrVar, - nutanixTemplateNameUbuntu125Var, nutanixTemplateNameUbuntu126Var, nutanixTemplateNameUbuntu127Var, nutanixTemplateNameUbuntu128Var, nutanixTemplateNameUbuntu129Var, - nutanixTemplateNameRedHat125Var, + nutanixTemplateNameUbuntu130Var, nutanixTemplateNameRedHat126Var, nutanixTemplateNameRedHat127Var, nutanixTemplateNameRedHat128Var, nutanixTemplateNameRedHat129Var, - nutanixTemplateNameRedHat9125Var, + nutanixTemplateNameRedHat130Var, nutanixTemplateNameRedHat9126Var, nutanixTemplateNameRedHat9127Var, nutanixTemplateNameRedHat9128Var, nutanixTemplateNameRedHat9129Var, + nutanixTemplateNameRedHat9130Var, nutanixInsecure, } @@ -158,9 +158,9 @@ func (n *Nutanix) UpdateKubeConfig(content *[]byte, clusterName string) error { return nil } -// CleanupVMs satisfies the test framework Provider. -func (n *Nutanix) CleanupVMs(clustername string) error { - return cleanup.NutanixTestResourcesCleanup(context.Background(), clustername, os.Getenv(nutanixEndpoint), os.Getenv(nutanixPort), true, true) +// CleanupResources satisfies the test framework Provider. +func (n *Nutanix) CleanupResources(clustername string) error { + return cleanup.NutanixTestResources(clustername, os.Getenv(nutanixEndpoint), os.Getenv(nutanixPort), true, true) } // ClusterConfigUpdates satisfies the test framework Provider. @@ -197,7 +197,7 @@ func (n *Nutanix) WithProviderUpgrade(fillers ...api.NutanixFiller) ClusterE2ETe // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all // nutanix machine configs. -func (n *Nutanix) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (n *Nutanix) WithKubeVersionAndOS(_ anywherev1.KubernetesVersion, _ OS, _ *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { // TODO: Update tests to use this panic("Not implemented for Nutanix yet") } @@ -221,12 +221,6 @@ func withNutanixKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS } } -// WithUbuntu125Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.25 -// and the "ubuntu" osFamily in all machine configs. -func WithUbuntu125Nutanix() NutanixOpt { - return withNutanixKubeVersionAndOS(anywherev1.Kube125, Ubuntu2004, nil) -} - // WithUbuntu126Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.26 // and the "ubuntu" osFamily in all machine configs. func WithUbuntu126Nutanix() NutanixOpt { @@ -251,10 +245,10 @@ func WithUbuntu129Nutanix() NutanixOpt { return withNutanixKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, nil) } -// WithRedHat125Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 8 Nutanix template for k8s 1.25 -// and the "redhat" osFamily in all machine configs. -func WithRedHat125Nutanix() NutanixOpt { - return withNutanixKubeVersionAndOS(anywherev1.Kube125, RedHat8, nil) +// WithUbuntu130Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.30 +// and the "ubuntu" osFamily in all machine configs. +func WithUbuntu130Nutanix() NutanixOpt { + return withNutanixKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) } // WithRedHat126Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 8 Nutanix template for k8s 1.26 @@ -281,10 +275,10 @@ func WithRedHat129Nutanix() NutanixOpt { return withNutanixKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } -// WithRedHat9Kubernetes125Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template for k8s 1.25 +// WithRedHat130Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 8 Nutanix template for k8s 1.30 // and the "redhat" osFamily in all machine configs. -func WithRedHat9Kubernetes125Nutanix() NutanixOpt { - return withNutanixKubeVersionAndOS(anywherev1.Kube125, RedHat9, nil) +func WithRedHat130Nutanix() NutanixOpt { + return withNutanixKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) } // WithRedHat9Kubernetes126Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template for k8s 1.26 @@ -311,6 +305,12 @@ func WithRedHat9Kubernetes129Nutanix() NutanixOpt { return withNutanixKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// WithRedHat9Kubernetes130Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template for k8s 1.30 +// and the "redhat" osFamily in all machine configs. +func WithRedHat9Kubernetes130Nutanix() NutanixOpt { + return withNutanixKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + // withNutanixKubeVersionAndOSForUUID returns a NutanixOpt that adds API fillers to use a Nutanix template UUID // corresponding to the provided OS family and Kubernetes version, in addition to configuring all machine configs // to use this OS family. @@ -321,12 +321,6 @@ func withNutanixKubeVersionAndOSForUUID(kubeVersion anywherev1.KubernetesVersion } } -// WithUbuntu125NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.25 -// and the "ubuntu" osFamily in all machine configs. -func WithUbuntu125NutanixUUID() NutanixOpt { - return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube125, Ubuntu2004, nil) -} - // WithUbuntu126NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.26 // and the "ubuntu" osFamily in all machine configs. func WithUbuntu126NutanixUUID() NutanixOpt { @@ -351,10 +345,10 @@ func WithUbuntu129NutanixUUID() NutanixOpt { return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube129, Ubuntu2004, nil) } -// WithRedHat125NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat Nutanix template UUID for k8s 1.25 -// and the "redhat" osFamily in all machine configs. -func WithRedHat125NutanixUUID() NutanixOpt { - return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube125, RedHat8, nil) +// WithUbuntu130NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.30 +// and the "ubuntu" osFamily in all machine configs. +func WithUbuntu130NutanixUUID() NutanixOpt { + return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube130, Ubuntu2004, nil) } // WithRedHat126NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat Nutanix template UUID for k8s 1.26 @@ -381,10 +375,10 @@ func WithRedHat129NutanixUUID() NutanixOpt { return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube129, RedHat8, nil) } -// WithRedHat9Kubernetes125NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template UUID for k8s 1.25 +// WithRedHat130NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat Nutanix template UUID for k8s 1.30 // and the "redhat" osFamily in all machine configs. -func WithRedHat9Kubernetes125NutanixUUID() NutanixOpt { - return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube125, RedHat9, nil) +func WithRedHat130NutanixUUID() NutanixOpt { + return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube130, RedHat8, nil) } // WithRedHat9Kubernetes126NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template UUID for k8s 1.26 @@ -411,6 +405,12 @@ func WithRedHat9Kubernetes129NutanixUUID() NutanixOpt { return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube129, RedHat9, nil) } +// WithRedHat9Kubernetes130NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template UUID for k8s 1.30 +// and the "redhat" osFamily in all machine configs. +func WithRedHat9Kubernetes130NutanixUUID() NutanixOpt { + return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube130, RedHat9, nil) +} + func (n *Nutanix) withNutanixUUID(name string, osFamily anywherev1.OSFamily) []api.NutanixFiller { uuid, err := n.client.GetImageUUIDFromName(context.Background(), name) if err != nil { @@ -457,12 +457,6 @@ func (n *Nutanix) templateForKubeVersionAndOS(kubeVersion anywherev1.KubernetesV return api.WithNutanixMachineTemplateImageName(template) } -// Ubuntu125Template returns NutanixFiller by reading the env var and setting machine config's -// image name parameter in the spec. -func (n *Nutanix) Ubuntu125Template() api.NutanixFiller { - return n.templateForKubeVersionAndOS(anywherev1.Kube125, Ubuntu2004, nil) -} - // Ubuntu126Template returns NutanixFiller by reading the env var and setting machine config's // image name parameter in the spec. func (n *Nutanix) Ubuntu126Template() api.NutanixFiller { @@ -487,10 +481,10 @@ func (n *Nutanix) Ubuntu129Template() api.NutanixFiller { return n.templateForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, nil) } -// RedHat125Template returns NutanixFiller by reading the env var and setting machine config's +// Ubuntu130Template returns NutanixFiller by reading the env var and setting machine config's // image name parameter in the spec. -func (n *Nutanix) RedHat125Template() api.NutanixFiller { - return n.templateForKubeVersionAndOS(anywherev1.Kube125, RedHat8, nil) +func (n *Nutanix) Ubuntu130Template() api.NutanixFiller { + return n.templateForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) } // RedHat126Template returns NutanixFiller by reading the env var and setting machine config's @@ -517,10 +511,10 @@ func (n *Nutanix) RedHat129Template() api.NutanixFiller { return n.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } -// RedHat9Kubernetes125Template returns NutanixFiller by reading the env var and setting machine config's +// RedHat130Template returns NutanixFiller by reading the env var and setting machine config's // image name parameter in the spec. -func (n *Nutanix) RedHat9Kubernetes125Template() api.NutanixFiller { - return n.templateForKubeVersionAndOS(anywherev1.Kube125, RedHat9, nil) +func (n *Nutanix) RedHat130Template() api.NutanixFiller { + return n.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) } // RedHat9Kubernetes126Template returns NutanixFiller by reading the env var and setting machine config's @@ -547,6 +541,12 @@ func (n *Nutanix) RedHat9Kubernetes129Template() api.NutanixFiller { return n.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// RedHat9Kubernetes130Template returns NutanixFiller by reading the env var and setting machine config's +// image name parameter in the spec. +func (n *Nutanix) RedHat9Kubernetes130Template() api.NutanixFiller { + return n.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + // ClusterStateValidations returns a list of provider specific ClusterStateValidations. func (n *Nutanix) ClusterStateValidations() []clusterf.StateValidation { return []clusterf.StateValidation{} diff --git a/test/framework/registry_mirror.go b/test/framework/registry_mirror.go index 44322448a30c..c39fcec9efb3 100644 --- a/test/framework/registry_mirror.go +++ b/test/framework/registry_mirror.go @@ -7,6 +7,7 @@ import ( "os" "github.com/aws/eks-anywhere/internal/pkg/api" + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/constants" ) @@ -33,6 +34,11 @@ const ( PrivateRegistryUsernameTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_USERNAME_TINKERBELL" PrivateRegistryPasswordTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_PASSWORD_TINKERBELL" PrivateRegistryCACertTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_CA_CERT_TINKERBELL" + + RegistryMirrorOciNamespacesRegistry1Var = "T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY1" + RegistryMirrorOciNamespacesNamespace1Var = "T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE1" + RegistryMirrorOciNamespacesRegistry2Var = "T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY2" + RegistryMirrorOciNamespacesNamespace2Var = "T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE2" ) var ( @@ -41,6 +47,7 @@ var ( registryMirrorDockerAirgappedRequiredEnvVars = []string{RegistryMirrorDefaultSecurityGroup, RegistryMirrorAirgappedSecurityGroup} privateRegistryMirrorRequiredEnvVars = []string{PrivateRegistryEndpointVar, PrivateRegistryPortVar, PrivateRegistryUsernameVar, PrivateRegistryPasswordVar, PrivateRegistryCACertVar} privateRegistryMirrorTinkerbellRequiredEnvVars = []string{PrivateRegistryEndpointTinkerbellVar, PrivateRegistryPortTinkerbellVar, PrivateRegistryUsernameTinkerbellVar, PrivateRegistryPasswordTinkerbellVar, PrivateRegistryCACertTinkerbellVar} + registryMirrorOciNamespacesRequiredEnvVars = []string{RegistryMirrorOciNamespacesRegistry1Var, RegistryMirrorOciNamespacesNamespace1Var} ) // WithRegistryMirrorInsecureSkipVerify sets up e2e for registry mirrors with InsecureSkipVerify option. @@ -57,6 +64,30 @@ func WithRegistryMirrorEndpointAndCert(providerName string) ClusterE2ETestOpt { } } +// WithRegistryMirrorOciNamespaces sets up e2e for registry mirrors with ocinamespaces. +func WithRegistryMirrorOciNamespaces(providerName string) ClusterE2ETestOpt { + return func(e *ClusterE2ETest) { + var ociNamespaces []v1alpha1.OCINamespace + + checkRequiredEnvVars(e.T, registryMirrorOciNamespacesRequiredEnvVars) + ociNamespaces = append(ociNamespaces, v1alpha1.OCINamespace{ + Registry: os.Getenv(RegistryMirrorOciNamespacesRegistry1Var), + Namespace: os.Getenv(RegistryMirrorOciNamespacesNamespace1Var), + }) + + reg2val, reg2Found := os.LookupEnv(RegistryMirrorOciNamespacesRegistry2Var) + ns2val, ns2Found := os.LookupEnv(RegistryMirrorOciNamespacesNamespace2Var) + if reg2Found && ns2Found { + ociNamespaces = append(ociNamespaces, v1alpha1.OCINamespace{ + Registry: reg2val, + Namespace: ns2val, + }) + } + + setupRegistryMirrorEndpointAndCert(e, providerName, false, ociNamespaces...) + } +} + // WithAuthenticatedRegistryMirror sets up e2e for authenticated registry mirrors. func WithAuthenticatedRegistryMirror(providerName string) ClusterE2ETestOpt { return func(e *ClusterE2ETest) { @@ -116,7 +147,12 @@ func RequiredRegistryMirrorEnvVars() []string { return append(registryMirrorRequiredEnvVars, registryMirrorDockerAirgappedRequiredEnvVars...) } -func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, insecureSkipVerify bool) { +// RequiredOciNamespacesEnvVars returns the Env variables to set for OCI Namespaces tests. +func RequiredOciNamespacesEnvVars() []string { + return append(registryMirrorOciNamespacesRequiredEnvVars, RegistryMirrorOciNamespacesRegistry2Var, RegistryMirrorOciNamespacesNamespace2Var) +} + +func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, insecureSkipVerify bool, ociNamespaces ...v1alpha1.OCINamespace) { var endpoint, hostPort, username, password, registryCert string port := "443" @@ -150,7 +186,7 @@ func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, certificate, err := base64.StdEncoding.DecodeString(registryCert) if err == nil { e.clusterFillers = append(e.clusterFillers, - api.WithRegistryMirror(endpoint, port, string(certificate), false, insecureSkipVerify), + api.WithRegistryMirror(endpoint, port, string(certificate), false, insecureSkipVerify, ociNamespaces...), ) } diff --git a/test/framework/snow.go b/test/framework/snow.go index a326c5af1ab3..386d86e6402d 100644 --- a/test/framework/snow.go +++ b/test/framework/snow.go @@ -102,8 +102,8 @@ func (s *Snow) ClusterConfigUpdates() []api.ClusterConfigFiller { return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...), api.SnowToConfigFiller(s.fillers...)} } -// CleanupVMs satisfies the test framework Provider. -func (s *Snow) CleanupVMs(clusterName string) error { +// CleanupResources satisfies the test framework Provider. +func (s *Snow) CleanupResources(clusterName string) error { snowDeviceIPs := strings.Split(os.Getenv(snowDevices), ",") s.t.Logf("Cleaning ec2 instances of %s in snow devices: %v", clusterName, snowDeviceIPs) @@ -330,7 +330,7 @@ func (s *Snow) withBottlerocketStaticIPForKubeVersion(kubeVersion anywherev1.Kub // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the correct AMI ID // and devices for the Snow machine configs. -func (s *Snow) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (s *Snow) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, _ *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { envar := fmt.Sprintf("T_SNOW_AMIID_%s_%s", strings.ToUpper(strings.ReplaceAll(string(os), "-", "_")), strings.ReplaceAll(string(kubeVersion), ".", "_")) return api.JoinClusterConfigFillers( diff --git a/test/framework/testdata/autoscaler_load.yaml b/test/framework/testdata/autoscaler_load.yaml new file mode 100644 index 000000000000..b3598594302f --- /dev/null +++ b/test/framework/testdata/autoscaler_load.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler-load +spec: + selector: + matchLabels: + run: autoscaler-load + replicas: 111 # k8s node can have upto 110 pods so this ensures atleast 1 pod is unschedulable. + template: + metadata: + labels: + run: autoscaler-load + spec: + containers: + - name: nginx + image: public.ecr.aws/docker/library/nginx:stable diff --git a/test/framework/testdata/hpa_busybox.yaml b/test/framework/testdata/hpa_busybox.yaml deleted file mode 100644 index 55d690289be1..000000000000 --- a/test/framework/testdata/hpa_busybox.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hpa-busybox-test -spec: - selector: - matchLabels: - run: hpa-busybox-test - replicas: 1 - template: - metadata: - labels: - run: hpa-busybox-test - spec: - containers: - - name: busybox - image: busybox:1.34 - resources: - limits: - cpu: 50m - requests: - cpu: 10m - memory: 500Mi - command: ["sh", "-c"] - args: - - while [ 1 ]; do - echo "Test"; - sleep 0.01; - done \ No newline at end of file diff --git a/test/framework/testdata/local-path-storage.yaml b/test/framework/testdata/local-path-storage.yaml new file mode 100644 index 000000000000..cad22c1e2150 --- /dev/null +++ b/test/framework/testdata/local-path-storage.yaml @@ -0,0 +1,129 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [ "" ] + resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "endpoints", "persistentvolumes", "pods" ] + verbs: [ "*" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: public.ecr.aws/eks-anywhere/rancher/local-path-provisioner:v0.0.26-eks-a-62 + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-path +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["/opt/local-path-provisioner"] + } + ] + } + setup: |- + #!/bin/sh + set -eu + mkdir -m 0777 -p "$VOL_DIR" + teardown: |- + #!/bin/sh + set -eu + rm -rf "$VOL_DIR" + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: public.ecr.aws/docker/library/busybox:1.36 + imagePullPolicy: IfNotPresent + diff --git a/test/framework/tinkerbell.go b/test/framework/tinkerbell.go index 1d56f12ca79f..bcc5fe71e87e 100644 --- a/test/framework/tinkerbell.go +++ b/test/framework/tinkerbell.go @@ -7,60 +7,63 @@ import ( "testing" "github.com/aws/eks-anywhere/internal/pkg/api" + "github.com/aws/eks-anywhere/internal/test/cleanup" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" clusterf "github.com/aws/eks-anywhere/test/framework/cluster" ) const ( - tinkerbellProviderName = "tinkerbell" - tinkerbellBootstrapIPEnvVar = "T_TINKERBELL_BOOTSTRAP_IP" - tinkerbellControlPlaneNetworkCidrEnvVar = "T_TINKERBELL_CP_NETWORK_CIDR" - tinkerbellImageUbuntu124EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_24" - tinkerbellImageUbuntu125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_25" - tinkerbellImageUbuntu126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_26" - tinkerbellImageUbuntu127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_27" - tinkerbellImageUbuntu128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_28" - tinkerbellImageUbuntu129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_29" - tinkerbellImageUbuntu2204Kubernetes124EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_24" - tinkerbellImageUbuntu2204Kubernetes125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_25" - tinkerbellImageUbuntu2204Kubernetes126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_26" - tinkerbellImageUbuntu2204Kubernetes127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_27" - tinkerbellImageUbuntu2204Kubernetes128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_28" - tinkerbellImageUbuntu2204Kubernetes129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_29" - tinkerbellImageRedHat124EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_24" - tinkerbellImageRedHat125EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_25" - tinkerbellImageRedHat126EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_26" - tinkerbellImageRedHat127EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_27" - tinkerbellImageRedHat128EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_28" - tinkerbellImageRedHat129EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_29" - tinkerbellInventoryCsvFilePathEnvVar = "T_TINKERBELL_INVENTORY_CSV" - tinkerbellSSHAuthorizedKey = "T_TINKERBELL_SSH_AUTHORIZED_KEY" - TinkerbellCIEnvironment = "T_TINKERBELL_CI_ENVIRONMENT" - controlPlaneIdentifier = "cp" - workerIdentifier = "worker" + tinkerbellProviderName = "tinkerbell" + tinkerbellBootstrapIPEnvVar = "T_TINKERBELL_BOOTSTRAP_IP" + tinkerbellControlPlaneNetworkCidrEnvVar = "T_TINKERBELL_CP_NETWORK_CIDR" + tinkerbellImageUbuntu125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_25" + tinkerbellImageUbuntu126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_26" + tinkerbellImageUbuntu127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_27" + tinkerbellImageUbuntu128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_28" + tinkerbellImageUbuntu129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_29" + tinkerbellImageUbuntu130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_30" + tinkerbellImageUbuntu2204Kubernetes125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_25" + tinkerbellImageUbuntu2204Kubernetes126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_26" + tinkerbellImageUbuntu2204Kubernetes127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_27" + tinkerbellImageUbuntu2204Kubernetes128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_28" + tinkerbellImageUbuntu2204Kubernetes129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_29" + tinkerbellImageUbuntu2204Kubernetes129RTOSEnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_29_RTOS" + tinkerbellImageUbuntu2204Kubernetes130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_30" + tinkerbellImageRedHat125EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_25" + tinkerbellImageRedHat126EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_26" + tinkerbellImageRedHat127EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_27" + tinkerbellImageRedHat128EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_28" + tinkerbellImageRedHat129EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_29" + tinkerbellImageRedHat130EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_30" + tinkerbellInventoryCsvFilePathEnvVar = "T_TINKERBELL_INVENTORY_CSV" + tinkerbellSSHAuthorizedKey = "T_TINKERBELL_SSH_AUTHORIZED_KEY" + tinkerbellCIEnvironmentEnvVar = "T_TINKERBELL_CI_ENVIRONMENT" + controlPlaneIdentifier = "cp" + workerIdentifier = "worker" ) var requiredTinkerbellEnvVars = []string{ tinkerbellControlPlaneNetworkCidrEnvVar, - tinkerbellImageUbuntu124EnvVar, tinkerbellImageUbuntu125EnvVar, tinkerbellImageUbuntu126EnvVar, tinkerbellImageUbuntu127EnvVar, tinkerbellImageUbuntu128EnvVar, tinkerbellImageUbuntu129EnvVar, - tinkerbellImageUbuntu2204Kubernetes124EnvVar, + tinkerbellImageUbuntu130EnvVar, tinkerbellImageUbuntu2204Kubernetes125EnvVar, tinkerbellImageUbuntu2204Kubernetes126EnvVar, tinkerbellImageUbuntu2204Kubernetes127EnvVar, tinkerbellImageUbuntu2204Kubernetes128EnvVar, tinkerbellImageUbuntu2204Kubernetes129EnvVar, - tinkerbellImageRedHat124EnvVar, + tinkerbellImageUbuntu2204Kubernetes129RTOSEnvVar, + tinkerbellImageUbuntu2204Kubernetes130EnvVar, tinkerbellImageRedHat125EnvVar, tinkerbellImageRedHat126EnvVar, tinkerbellImageRedHat127EnvVar, tinkerbellImageRedHat128EnvVar, tinkerbellImageRedHat129EnvVar, + tinkerbellImageRedHat130EnvVar, tinkerbellInventoryCsvFilePathEnvVar, tinkerbellSSHAuthorizedKey, } @@ -146,17 +149,18 @@ func (t *Tinkerbell) WithProviderUpgrade(fillers ...api.TinkerbellFiller) Cluste } } -func (t *Tinkerbell) CleanupVMs(_ string) error { - return nil +// CleanupResources runs a clean up the Tinkerbell machines which simply powers them down. +func (t *Tinkerbell) CleanupResources(_ string) error { + return cleanup.TinkerbellTestResources(t.inventoryCsvFilePath, true) } // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right image for all // tinkerbell machine configs. -func (t *Tinkerbell) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (t *Tinkerbell) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, _ *releasev1.EksARelease, rtos ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), api.TinkerbellToConfigFiller( - imageForKubeVersionAndOS(kubeVersion, os, ""), + imageForKubeVersionAndOS(kubeVersion, os, "", rtos...), api.WithOsFamilyForAllTinkerbellMachines(osFamiliesForOS[os]), ), ) @@ -189,8 +193,11 @@ func (t *Tinkerbell) WithNewWorkerNodeGroup(name string, workerNodeGroup *Worker panic("Not implemented for Tinkerbell yet") } -func envVarForImage(os OS, kubeVersion anywherev1.KubernetesVersion) string { +func envVarForImage(os OS, kubeVersion anywherev1.KubernetesVersion, rtos ...bool) string { imageEnvVar := fmt.Sprintf("T_TINKERBELL_IMAGE_%s_%s", strings.ToUpper(strings.ReplaceAll(string(os), "-", "_")), strings.ReplaceAll(string(kubeVersion), ".", "_")) + if len(rtos) > 0 && rtos[0] { + imageEnvVar = fmt.Sprintf("%s_RTOS", imageEnvVar) + } return imageEnvVar } @@ -237,6 +244,11 @@ func WithUbuntu129Tinkerbell() TinkerbellOpt { return withKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, "", nil) } +// WithUbuntu130Tinkerbell tink test with ubuntu 1.30. +func WithUbuntu130Tinkerbell() TinkerbellOpt { + return withKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, "", nil) +} + // WithRedHat125Tinkerbell tink test with redhat 1.25. func WithRedHat125Tinkerbell() TinkerbellOpt { return withKubeVersionAndOS(anywherev1.Kube125, RedHat8, "", nil) @@ -262,6 +274,11 @@ func WithRedHat129Tinkerbell() TinkerbellOpt { return withKubeVersionAndOS(anywherev1.Kube129, RedHat8, "", nil) } +// WithRedHat130Tinkerbell tink test with redhat 1.30. +func WithRedHat130Tinkerbell() TinkerbellOpt { + return withKubeVersionAndOS(anywherev1.Kube130, RedHat8, "", nil) +} + func WithBottleRocketTinkerbell() TinkerbellOpt { return func(t *Tinkerbell) { t.fillers = append(t.fillers, @@ -307,14 +324,14 @@ func WithHookImagesURLPath(url string) TinkerbellOpt { } // imageForKubeVersionAndOS sets osImageURL on the appropriate field in the Machine Config based on the machineConfigType string provided else sets it at Data Center config. -func imageForKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, operatingSystem OS, machineConfigType string) api.TinkerbellFiller { +func imageForKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, operatingSystem OS, machineConfigType string, rtos ...bool) api.TinkerbellFiller { var tinkerbellFiller api.TinkerbellFiller if machineConfigType == workerIdentifier { - tinkerbellFiller = api.WithTinkerbellWorkerMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion)), osFamiliesForOS[operatingSystem]) + tinkerbellFiller = api.WithTinkerbellWorkerMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion, rtos...)), osFamiliesForOS[operatingSystem]) } else if machineConfigType == controlPlaneIdentifier { - tinkerbellFiller = api.WithTinkerbellCPMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion)), osFamiliesForOS[operatingSystem]) + tinkerbellFiller = api.WithTinkerbellCPMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion, rtos...)), osFamiliesForOS[operatingSystem]) } else { - tinkerbellFiller = api.WithTinkerbellOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion))) + tinkerbellFiller = api.WithTinkerbellOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion, rtos...))) } return tinkerbellFiller } @@ -344,6 +361,11 @@ func Ubuntu129Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, "") } +// Ubuntu130Image represents an Ubuntu raw image corresponding to Kubernetes 1.30. +func Ubuntu130Image() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, "") +} + // Ubuntu126ImageForCP represents an Ubuntu raw image corresponding to Kubernetes 1.28 and is set for CP machine config. func Ubuntu126ImageForCP() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube126, Ubuntu2004, controlPlaneIdentifier) @@ -364,6 +386,11 @@ func Ubuntu129ImageForCP() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, controlPlaneIdentifier) } +// Ubuntu130ImageForCP represents an Ubuntu raw image corresponding to Kubernetes 1.30 and is set for CP machine config. +func Ubuntu130ImageForCP() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, controlPlaneIdentifier) +} + // Ubuntu126ImageForWorker represents an Ubuntu raw image corresponding to Kubernetes 1.28 and is set for worker machine config. func Ubuntu126ImageForWorker() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube126, Ubuntu2004, workerIdentifier) @@ -384,6 +411,11 @@ func Ubuntu129ImageForWorker() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, workerIdentifier) } +// Ubuntu130ImageForWorker represents an Ubuntu raw image corresponding to Kubernetes 1.30 and is set for worker machine config. +func Ubuntu130ImageForWorker() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, workerIdentifier) +} + // Ubuntu2204Kubernetes126Image represents an Ubuntu 22.04 raw image corresponding to Kubernetes 1.26. func Ubuntu2204Kubernetes126Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube126, Ubuntu2204, "") @@ -403,3 +435,13 @@ func Ubuntu2204Kubernetes128Image() api.TinkerbellFiller { func Ubuntu2204Kubernetes129Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2204, "") } + +// Ubuntu2204Kubernetes129RTOSImage represents an Ubuntu 22.04 RTOS raw image corresponding to Kubernetes 1.29. +func Ubuntu2204Kubernetes129RTOSImage() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2204, "", true) +} + +// Ubuntu2204Kubernetes130Image represents an Ubuntu 22.04 raw image corresponding to Kubernetes 1.30. +func Ubuntu2204Kubernetes130Image() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2204, "") +} diff --git a/test/framework/vsphere.go b/test/framework/vsphere.go index 6e4b9ec28c06..f7a22984e982 100644 --- a/test/framework/vsphere.go +++ b/test/framework/vsphere.go @@ -165,6 +165,17 @@ func WithRedHat129VSphere() VSphereOpt { return withVSphereKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } +// WithRedHat130VSphere vsphere test with Redhat 8 for Kubernetes 1.30. +func WithRedHat130VSphere() VSphereOpt { + return withVSphereKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + +// WithUbuntu130 returns a VSphereOpt that adds API fillers to use a Ubuntu vSphere template for k8s 1.30 +// and the "ubuntu" osFamily in all machine configs. +func WithUbuntu130() VSphereOpt { + return withVSphereKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) +} + // WithUbuntu129 returns a VSphereOpt that adds API fillers to use a Ubuntu vSphere template for k8s 1.29 // and the "ubuntu" osFamily in all machine configs. func WithUbuntu129() VSphereOpt { @@ -220,6 +231,11 @@ func WithBottleRocket129() VSphereOpt { return withVSphereKubeVersionAndOS(anywherev1.Kube129, Bottlerocket1, nil) } +// WithBottleRocket130 returns br 1.30 var. +func WithBottleRocket130() VSphereOpt { + return withVSphereKubeVersionAndOS(anywherev1.Kube130, Bottlerocket1, nil) +} + func WithPrivateNetwork() VSphereOpt { return func(v *VSphere) { v.fillers = append(v.fillers, @@ -313,7 +329,7 @@ func WithVSphereWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup, f } // WithMachineTemplate returns an api.ClusterConfigFiller that changes template in machine template. -func (v *VSphere) WithMachineTemplate(machineName string, template string) api.ClusterConfigFiller { +func (v *VSphere) WithMachineTemplate(machineName, template string) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.VSphereToConfigFiller(api.WithMachineTemplate(machineName, template)), ) @@ -375,7 +391,7 @@ func (v *VSphere) ClusterConfigUpdates() []api.ClusterConfigFiller { // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all // vsphere machine configs. -func (v *VSphere) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (v *VSphere) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), api.VSphereToConfigFiller( @@ -425,8 +441,8 @@ func (v *VSphere) WithBottleRocket125() api.ClusterConfigFiller { return v.WithKubeVersionAndOS(anywherev1.Kube125, Bottlerocket1, nil) } -// CleanupVMs deletes all the VMs owned by the test EKS-A cluster. It satisfies the test framework Provider. -func (v *VSphere) CleanupVMs(clusterName string) error { +// CleanupResources deletes all the VMs owned by the test EKS-A cluster. It satisfies the test framework Provider. +func (v *VSphere) CleanupResources(clusterName string) error { return cleanup.CleanUpVsphereTestResources(context.Background(), clusterName) } @@ -493,6 +509,11 @@ func (v *VSphere) Ubuntu129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, nil) } +// Ubuntu130Template returns vsphere filler for 1.30 Ubuntu. +func (v *VSphere) Ubuntu130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) +} + // Ubuntu128TemplateForMachineConfig returns vsphere filler for 1.28 Ubuntu for a specific machine config. func (v *VSphere) Ubuntu128TemplateForMachineConfig(name string) api.VSphereFiller { return v.templateForKubeVersionAndOSMachineConfig(name, anywherev1.Kube128, Ubuntu2004) @@ -518,6 +539,11 @@ func (v *VSphere) Ubuntu2204Kubernetes129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2204, nil) } +// Ubuntu2204Kubernetes130Template returns vsphere filler for 1.30 Ubuntu 22.04. +func (v *VSphere) Ubuntu2204Kubernetes130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2204, nil) +} + // Bottlerocket125Template returns vsphere filler for 1.25 BR. func (v *VSphere) Bottlerocket125Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube125, Bottlerocket1, nil) @@ -543,6 +569,16 @@ func (v *VSphere) Bottlerocket129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, Bottlerocket1, nil) } +// Bottlerocket130Template returns vsphere filler for 1.30 BR. +func (v *VSphere) Bottlerocket130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, Bottlerocket1, nil) +} + +// Redhat130Template returns vsphere filler for 1.30 Redhat. +func (v *VSphere) Redhat130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + // Redhat129Template returns vsphere filler for 1.29 Redhat. func (v *VSphere) Redhat129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil)