From f757a594a92d08db4187a11c86a0e0fbc15d582a Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Fri, 22 Mar 2024 08:41:32 -0700 Subject: [PATCH 001/193] Add api server extra args feature flag to eksa controller deployment (#7883) --- pkg/clustermanager/eksa_installer.go | 5 +++++ pkg/clustermanager/eksa_installer_test.go | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/pkg/clustermanager/eksa_installer.go b/pkg/clustermanager/eksa_installer.go index 86193c334419..6e7b27fe2cc7 100644 --- a/pkg/clustermanager/eksa_installer.go +++ b/pkg/clustermanager/eksa_installer.go @@ -247,6 +247,11 @@ func setManagerEnvVars(d *appsv1.Deployment, spec *cluster.Spec) { envVars = append(envVars, v1.EnvVar{Name: features.VSphereInPlaceEnvVar, Value: "true"}) } + // TODO: remove this feature flag when we support API server flags. + if features.IsActive(features.APIServerExtraArgsEnabled()) { + envVars = append(envVars, v1.EnvVar{Name: features.APIServerExtraArgsEnabledEnvVar, Value: "true"}) + } + d.Spec.Template.Spec.Containers[0].Env = envVars } diff --git a/pkg/clustermanager/eksa_installer_test.go b/pkg/clustermanager/eksa_installer_test.go index 16875979f4ab..3d17b1738367 100644 --- a/pkg/clustermanager/eksa_installer_test.go +++ b/pkg/clustermanager/eksa_installer_test.go @@ -420,6 +420,26 @@ func TestSetManagerEnvVarsVSphereInPlaceUpgrade(t *testing.T) { g.Expect(deploy).To(Equal(want)) } +func TestSetManagerEnvVarsAPIServerExtraArgs(t *testing.T) { + g := NewWithT(t) + features.ClearCache() + t.Setenv(features.APIServerExtraArgsEnabledEnvVar, "true") + + deploy := deployment() + spec := test.NewClusterSpec() + want := deployment(func(d *appsv1.Deployment) { + d.Spec.Template.Spec.Containers[0].Env = []corev1.EnvVar{ + { + Name: "API_SERVER_EXTRA_ARGS_ENABLED", + Value: "true", + }, + } + }) + + clustermanager.SetManagerEnvVars(deploy, spec) + g.Expect(deploy).To(Equal(want)) +} + func TestEKSAInstallerNewUpgraderConfigMap(t *testing.T) { tt := newInstallerTest(t) From 5f7b94d440cae6fa63c53973115c2fb31773bb40 Mon Sep 17 00:00:00 2001 From: Tanvir Tatla Date: Fri, 22 Mar 2024 09:36:32 -0700 Subject: [PATCH 002/193] add cluster mover (#7790) --- pkg/clustermanager/eksa_mover.go | 137 ++++++++++++++++++++++++++ pkg/clustermanager/eksa_mover_test.go | 126 +++++++++++++++++++++++ pkg/dependencies/factory.go | 21 ++++ pkg/dependencies/factory_test.go | 12 +++ 4 files changed, 296 insertions(+) create mode 100644 pkg/clustermanager/eksa_mover.go create mode 100644 pkg/clustermanager/eksa_mover_test.go diff --git a/pkg/clustermanager/eksa_mover.go b/pkg/clustermanager/eksa_mover.go new file mode 100644 index 000000000000..f2a72f66bb94 --- /dev/null +++ b/pkg/clustermanager/eksa_mover.go @@ -0,0 +1,137 @@ +package clustermanager + +import ( + "context" + "math" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/retrier" +) + +// MoverOpt allows to customize a Mover on construction. +type MoverOpt func(*Mover) + +// Mover applies the cluster spec to the management cluster and waits +// until the changes are fully reconciled. +type Mover struct { + log logr.Logger + clientFactory ClientFactory + moveClusterTimeout time.Duration + retryBackOff time.Duration +} + +// NewMover builds an Mover. +func NewMover(log logr.Logger, clientFactory ClientFactory, opts ...MoverOpt) *Mover { + m := &Mover{ + log: log, + clientFactory: clientFactory, + moveClusterTimeout: applyClusterSpecTimeout, + retryBackOff: retryBackOff, + } + + for _, opt := range opts { + opt(m) + } + + return m +} + +// WithMoverNoTimeouts disables the timeout for all the waits and retries in management upgrader. +func WithMoverNoTimeouts() MoverOpt { + return func(a *Mover) { + maxTime := time.Duration(math.MaxInt64) + a.moveClusterTimeout = maxTime + } +} + +// WithMoverApplyClusterTimeout allows to configure how long the mover retries +// to apply the objects in case of failure. +// Generally only used in tests. +func WithMoverApplyClusterTimeout(timeout time.Duration) MoverOpt { + return func(m *Mover) { + m.moveClusterTimeout = timeout + } +} + +// WithMoverRetryBackOff allows to configure how long the mover waits between requests +// to update the cluster spec objects and check the status of the Cluster. +// Generally only used in tests. +func WithMoverRetryBackOff(backOff time.Duration) MoverOpt { + return func(m *Mover) { + m.retryBackOff = backOff + } +} + +// Move applies the cluster's namespace and spec without checking for reconcile conditions. +func (m *Mover) Move(ctx context.Context, spec *cluster.Spec, fromClient, toClient kubernetes.Client) error { + m.log.V(3).Info("Moving the cluster object") + err := retrier.New( + m.moveClusterTimeout, + retrier.WithRetryPolicy(retrier.BackOffPolicy(m.retryBackOff)), + ).Retry(func() error { + // read the cluster from bootstrap + cluster := &v1alpha1.Cluster{} + if err := fromClient.Get(ctx, spec.Cluster.Name, spec.Cluster.Namespace, cluster); err != nil { + return errors.Wrapf(err, "reading cluster from source") + } + + // pause cluster on bootstrap + cluster.PauseReconcile() + if err := fromClient.Update(ctx, cluster); err != nil { + return errors.Wrapf(err, "updating cluster on source") + } + + if err := moveClusterResource(ctx, cluster, toClient); err != nil { + return err + } + + if err := moveChildObjects(ctx, spec, fromClient, toClient); err != nil { + return err + } + + return nil + }) + + return err +} + +func moveClusterResource(ctx context.Context, cluster *v1alpha1.Cluster, client kubernetes.Client) error { + cluster.ResourceVersion = "" + cluster.UID = "" + + // move eksa cluster + if err := client.Create(ctx, cluster); err != nil && !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "moving cluster %s", cluster.Name) + } + + return nil +} + +func moveChildObjects(ctx context.Context, spec *cluster.Spec, fromClient, toClient kubernetes.Client) error { + // read and move child objects + for _, child := range spec.ChildObjects() { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(child.GetObjectKind().GroupVersionKind()) + if err := fromClient.Get(ctx, child.GetName(), child.GetNamespace(), obj); err != nil { + return errors.Wrapf(err, "reading child object %s %s", child.GetObjectKind().GroupVersionKind().Kind, child.GetName()) + } + + obj.SetResourceVersion("") + obj.SetUID("") + obj.SetOwnerReferences(nil) + + if err := toClient.Create(ctx, obj); err != nil && !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "moving child object %s %s", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName()) + } + } + + return nil +} diff --git a/pkg/clustermanager/eksa_mover_test.go b/pkg/clustermanager/eksa_mover_test.go new file mode 100644 index 000000000000..e151358f9d29 --- /dev/null +++ b/pkg/clustermanager/eksa_mover_test.go @@ -0,0 +1,126 @@ +package clustermanager_test + +import ( + "context" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/golang/mock/gomock" + "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/aws/eks-anywhere/internal/test" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/clustermanager" + "github.com/aws/eks-anywhere/pkg/clustermanager/mocks" + "github.com/aws/eks-anywhere/pkg/controller/clientutil" + "github.com/aws/eks-anywhere/pkg/types" +) + +type moverTest struct { + gomega.Gomega + tb testing.TB + clientFactory *mocks.MockClientFactory + ctx context.Context + spec *cluster.Spec + fromClient kubernetes.Client + toClient kubernetes.Client + log logr.Logger + mgmtCluster *types.Cluster + bootstrap *types.Cluster +} + +func newMoverTest(tb testing.TB) *moverTest { + ctrl := gomock.NewController(tb) + return &moverTest{ + tb: tb, + Gomega: gomega.NewWithT(tb), + clientFactory: mocks.NewMockClientFactory(ctrl), + ctx: context.Background(), + spec: test.VSphereClusterSpec(tb, tb.Name()), + log: test.NewNullLogger(), + bootstrap: &types.Cluster{ + KubeconfigFile: "bootstrap-config", + }, + mgmtCluster: &types.Cluster{ + KubeconfigFile: "my-config", + }, + } +} + +func (a *moverTest) buildClients(fromObjs, toObjs []kubernetes.Object) { + a.fromClient = test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(fromObjs)...) + a.toClient = test.NewFakeKubeClient(clientutil.ObjectsToClientObjects(toObjs)...) +} + +func TestMoverSuccess(t *testing.T) { + tt := newMoverTest(t) + objs := tt.spec.ClusterAndChildren() + tt.buildClients(objs, nil) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverNoTimeouts(), + ) + + tt.Expect(m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient)).To(gomega.Succeed()) + + for _, obj := range objs { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + tt.Expect(tt.toClient.Get(tt.ctx, obj.GetName(), obj.GetNamespace(), u)).To(gomega.Succeed()) + original, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + tt.Expect(err).To(gomega.Succeed()) + tt.Expect(u.Object["spec"]).To(gomega.BeComparableTo(original["spec"])) + } +} + +func TestMoverFailReadCluster(t *testing.T) { + tt := newMoverTest(t) + tt.buildClients(nil, nil) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverApplyClusterTimeout(time.Millisecond), + ) + err := m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient) + + tt.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("reading cluster from source"))) +} + +func TestMoverFailGetChildren(t *testing.T) { + tt := newMoverTest(t) + objs := []kubernetes.Object{tt.spec.Cluster} + tt.buildClients(objs, nil) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverApplyClusterTimeout(time.Millisecond), + ) + + err := m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient) + tt.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("reading child object"))) +} + +func TestMoverAlreadyMoved(t *testing.T) { + tt := newMoverTest(t) + objs := tt.spec.ClusterAndChildren() + tt.buildClients(objs, objs) + m := clustermanager.NewMover(tt.log, tt.clientFactory, + clustermanager.WithMoverRetryBackOff(time.Millisecond), + clustermanager.WithMoverApplyClusterTimeout(time.Millisecond), + ) + + err := m.Move(tt.ctx, tt.spec, tt.fromClient, tt.toClient) + tt.Expect(err).To(gomega.Succeed()) + + for _, obj := range objs { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + tt.Expect(tt.toClient.Get(tt.ctx, obj.GetName(), obj.GetNamespace(), u)).To(gomega.Succeed()) + original, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + tt.Expect(err).To(gomega.Succeed()) + // the entire object including metadata/status should be equal if the object already exists in dst + tt.Expect(u.Object).To(gomega.BeComparableTo(original)) + } +} diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 66966a432b0d..3f56d8f64f02 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -115,6 +115,7 @@ type Dependencies struct { EksaInstaller *clustermanager.EKSAInstaller DeleteClusterDefaulter cli.DeleteClusterDefaulter ClusterDeleter clustermanager.Deleter + ClusterMover *clustermanager.Mover } // KubeClients defines super struct that exposes all behavior. @@ -1216,6 +1217,26 @@ func (f *Factory) WithClusterDeleter() *Factory { return f } +// WithClusterMover builds a cluster mover. +func (f *Factory) WithClusterMover() *Factory { + f.WithLogger().WithUnAuthKubeClient().WithLogger() + + f.buildSteps = append(f.buildSteps, func(_ context.Context) error { + var opts []clustermanager.MoverOpt + if f.config.noTimeouts { + opts = append(opts, clustermanager.WithMoverNoTimeouts()) + } + + f.dependencies.ClusterMover = clustermanager.NewMover( + f.dependencies.Logger, + f.dependencies.UnAuthKubeClient, + opts..., + ) + return nil + }) + return f +} + // WithValidatorClients builds KubeClients. func (f *Factory) WithValidatorClients() *Factory { f.WithKubectl().WithUnAuthKubeClient() diff --git a/pkg/dependencies/factory_test.go b/pkg/dependencies/factory_test.go index 626e412574df..d8d0e1c9bb81 100644 --- a/pkg/dependencies/factory_test.go +++ b/pkg/dependencies/factory_test.go @@ -645,6 +645,18 @@ func TestFactoryBuildWithClusterDeleterNoTimeout(t *testing.T) { tt.Expect(deps.ClusterApplier).NotTo(BeNil()) } +func TestFactoryBuildWithClusterMoverNoTimeout(t *testing.T) { + tt := newTest(t, vsphere) + deps, err := dependencies.NewFactory(). + WithLocalExecutables(). + WithNoTimeouts(). + WithClusterMover(). + Build(context.Background()) + + tt.Expect(err).To(BeNil()) + tt.Expect(deps.ClusterApplier).NotTo(BeNil()) +} + func TestFactoryBuildWithAwsIamAuthNoTimeout(t *testing.T) { tt := newTest(t, vsphere) deps, err := dependencies.NewFactory(). From 12f8b6bfd008c9fd791c962fe9ddc8ee74b031b1 Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Fri, 22 Mar 2024 12:24:32 -0700 Subject: [PATCH 003/193] Fixing replace tinkerbell action images with registry endpoint (#7885) --- pkg/providers/tinkerbell/template.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pkg/providers/tinkerbell/template.go b/pkg/providers/tinkerbell/template.go index d829723d8f66..3a6c14309028 100644 --- a/pkg/providers/tinkerbell/template.go +++ b/pkg/providers/tinkerbell/template.go @@ -454,9 +454,11 @@ func buildTemplateMapCP( } // Replace public.ecr.aws endpoint with the endpoint given in the cluster config file - localRegistry := values["publicMirror"].(string) - cpTemplateOverride = strings.ReplaceAll(cpTemplateOverride, defaultRegistry, localRegistry) - etcdTemplateOverride = strings.ReplaceAll(etcdTemplateOverride, defaultRegistry, localRegistry) + localRegistry := values["coreEKSAMirror"].(string) + if localRegistry != "" { + cpTemplateOverride = strings.ReplaceAll(cpTemplateOverride, defaultRegistry, localRegistry) + etcdTemplateOverride = strings.ReplaceAll(etcdTemplateOverride, defaultRegistry, localRegistry) + } } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { @@ -553,8 +555,10 @@ func buildTemplateMapMD( } // Replace public.ecr.aws endpoint with the endpoint given in the cluster config file - localRegistry := values["publicMirror"].(string) - workerTemplateOverride = strings.ReplaceAll(workerTemplateOverride, defaultRegistry, localRegistry) + localRegistry := values["coreEKSAMirror"].(string) + if localRegistry != "" { + workerTemplateOverride = strings.ReplaceAll(workerTemplateOverride, defaultRegistry, localRegistry) + } } if clusterSpec.Cluster.Spec.ProxyConfiguration != nil { @@ -624,6 +628,8 @@ func populateRegistryMirrorValues(clusterSpec *cluster.Spec, values map[string]i values["mirrorBase"] = registryMirror.BaseRegistry values["insecureSkip"] = registryMirror.InsecureSkipVerify values["publicMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) + values["coreEKSAMirror"] = registryMirror.CoreEKSAMirror() + if len(registryMirror.CACertContent) > 0 { values["registryCACert"] = registryMirror.CACertContent } From 6b626bb7740c076069ea33905c59bf7080c514b9 Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:05:32 -0700 Subject: [PATCH 004/193] Fixing docker e2e emissary tests for hello application (#7887) --- test/framework/cluster.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/framework/cluster.go b/test/framework/cluster.go index d5ce8fe297e6..fb7126e017f1 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -1610,7 +1610,7 @@ func (e *ClusterE2ETest) TestEmissaryPackageRouting(packageName, checkName strin ctx := context.Background() packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName) - err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), emisarryPackage) + err := e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), emisarryPackage, packageMetadatNamespace) if err != nil { e.T.Errorf("Error upgrading emissary package: %v", err) return @@ -1634,6 +1634,8 @@ func (e *ClusterE2ETest) TestEmissaryPackageRouting(packageName, checkName strin e.T.Errorf("Error applying roles for oids: %v", err) return } + e.T.Log("Waiting for hello service") + time.Sleep(60 * time.Second) // Functional testing of Emissary Ingress ingresssvcAddress := checkName + "." + constants.EksaPackagesName + ".svc.cluster.local" From a29b9160530a4ed61dd6c8fe07864de86d984196 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Fri, 22 Mar 2024 16:42:33 -0700 Subject: [PATCH 005/193] Update CLI generate clusterconfig description for provider flag (#7888) --- cmd/eksctl-anywhere/cmd/generateclusterconfig.go | 2 +- pkg/constants/constants.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/cmd/eksctl-anywhere/cmd/generateclusterconfig.go b/cmd/eksctl-anywhere/cmd/generateclusterconfig.go index 0d1ac32269b3..78ce9302dd47 100644 --- a/cmd/eksctl-anywhere/cmd/generateclusterconfig.go +++ b/cmd/eksctl-anywhere/cmd/generateclusterconfig.go @@ -49,7 +49,7 @@ func preRunGenerateClusterConfig(cmd *cobra.Command, args []string) { func init() { generateCmd.AddCommand(generateClusterConfigCmd) - generateClusterConfigCmd.Flags().StringP("provider", "p", "", "Provider to use (vsphere or tinkerbell or docker)") + generateClusterConfigCmd.Flags().StringP("provider", "p", "", fmt.Sprintf("Provider to use (%s)", strings.Join(constants.SupportedProviders, " or "))) err := generateClusterConfigCmd.MarkFlagRequired("provider") if err != nil { log.Fatalf("marking flag as required: %v", err) diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 178e2b58ff13..3eb41b91e27f 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -115,3 +115,13 @@ const ( // EKSACLIFieldManager is the owner name for fields applied by the EKS-A CLI. const EKSACLIFieldManager = "eks-a-cli" + +// SupportedProviders is the list of supported providers for generating EKS-A cluster spec. +var SupportedProviders = []string{ + VSphereProviderName, + CloudStackProviderName, + TinkerbellProviderName, + DockerProviderName, + NutanixProviderName, + SnowProviderName, +} From cec82a82a3b6d4f400912c835b640fab1f071bde Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Fri, 22 Mar 2024 17:50:32 -0700 Subject: [PATCH 006/193] Adding support for mutliple oci namespaces in BR OS vsphere (#7876) --- internal/pkg/api/cluster.go | 6 +- pkg/providers/vsphere/config/template-cp.yaml | 24 +- pkg/providers/vsphere/config/template-md.yaml | 12 +- pkg/providers/vsphere/template.go | 12 + ..._mirror_config_multiple_ocinamespaces.yaml | 114 +++ ...rror_config_multiple_ocinamespaces_cp.yaml | 730 ++++++++++++++++++ ...rror_config_multiple_ocinamespaces_md.yaml | 101 +++ pkg/providers/vsphere/vsphere_test.go | 37 + pkg/validations/cluster.go | 9 - pkg/validations/cluster_test.go | 2 +- test/e2e/vsphere_test.go | 13 + test/framework/registry_mirror.go | 35 +- 12 files changed, 1079 insertions(+), 16 deletions(-) create mode 100644 pkg/providers/vsphere/testdata/cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml diff --git a/internal/pkg/api/cluster.go b/internal/pkg/api/cluster.go index 07cb59fe0660..a2fd6f03b58a 100644 --- a/internal/pkg/api/cluster.go +++ b/internal/pkg/api/cluster.go @@ -271,7 +271,7 @@ func WithProxyConfig(httpProxy, httpsProxy string, noProxy []string) ClusterFill } // WithRegistryMirror adds a registry mirror configuration. -func WithRegistryMirror(endpoint, port string, caCert string, authenticate bool, insecureSkipVerify bool) ClusterFiller { +func WithRegistryMirror(endpoint, port string, caCert string, authenticate bool, insecureSkipVerify bool, ociNamespaces ...anywherev1.OCINamespace) ClusterFiller { return func(c *anywherev1.Cluster) { if c.Spec.RegistryMirrorConfiguration == nil { c.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{} @@ -281,6 +281,10 @@ func WithRegistryMirror(endpoint, port string, caCert string, authenticate bool, c.Spec.RegistryMirrorConfiguration.CACertContent = caCert c.Spec.RegistryMirrorConfiguration.Authenticate = authenticate c.Spec.RegistryMirrorConfiguration.InsecureSkipVerify = insecureSkipVerify + + if len(ociNamespaces) != 0 { + c.Spec.RegistryMirrorConfiguration.OCINamespaces = ociNamespaces + } } } diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index 70165168d331..d7a4af50c5f2 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -133,11 +133,21 @@ spec: {{- end }} {{- if and .registryMirrorMap (eq .format "bottlerocket") }} registryMirror: - endpoint: {{ .publicMirror }} + {{- if .publicECRMirror }} + endpoint: {{ .publicECRMirror }} + {{- end }} {{- if .registryCACert }} caCert: | {{ .registryCACert | indent 10 }} {{- end }} + {{- if not .publicECRMirror }} + mirrors: + {{- range $orig, $mirror := .registryMirrorMap }} + - registry: "{{ $orig }}" + endpoints: + - {{ $mirror }} + {{- end }} + {{- end }} {{- end }} {{- if .bottlerocketSettings }} {{ .bottlerocketSettings | indent 6 }} @@ -422,11 +432,21 @@ spec: {{- end }} {{- if and .registryMirrorMap (eq .format "bottlerocket") }} registryMirror: - endpoint: {{ .publicMirror }} + {{- if .publicECRMirror }} + endpoint: {{ .publicECRMirror }} + {{- end }} {{- if .registryCACert }} caCert: | {{ .registryCACert | indent 10 }} {{- end }} + {{- if not .publicECRMirror }} + mirrors: + {{- range $orig, $mirror := .registryMirrorMap }} + - registry: "{{ $orig }}" + endpoints: + - {{ $mirror }} + {{- end }} + {{- end }} {{- end }} {{- if .bottlerocketSettings }} {{ .bottlerocketSettings | indent 6 }} diff --git a/pkg/providers/vsphere/config/template-md.yaml b/pkg/providers/vsphere/config/template-md.yaml index 9427538701b4..6637ba98eeab 100644 --- a/pkg/providers/vsphere/config/template-md.yaml +++ b/pkg/providers/vsphere/config/template-md.yaml @@ -24,11 +24,21 @@ spec: {{- end }} {{- if and .registryMirrorMap (eq .format "bottlerocket") }} registryMirror: - endpoint: {{ .publicMirror }} + {{- if .publicECRMirror }} + endpoint: {{ .publicECRMirror }} + {{- end }} {{- if .registryCACert }} caCert: | {{ .registryCACert | indent 12 }} {{- end }} + {{- if not .publicECRMirror }} + mirrors: + {{- range $orig, $mirror := .registryMirrorMap }} + - registry: "{{ $orig }}" + endpoints: + - {{ $mirror }} + {{- end }} + {{- end }} {{- end }} {{- if .bottlerocketSettings }} {{ .bottlerocketSettings | indent 8 }} diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index 2e24f7a8d614..50048573d8e5 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -230,6 +230,12 @@ func buildTemplateMapCP( values["registryCACert"] = registryMirror.CACertContent } + if controlPlaneMachineSpec.OSFamily == anywherev1.Bottlerocket && + len(registryMirror.NamespacedRegistryMap) == 1 && + registryMirror.CoreEKSAMirror() != "" { + values["publicECRMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) + } + if registryMirror.Auth { values["registryAuth"] = registryMirror.Auth username, password, err := config.ReadCredentials() @@ -419,6 +425,12 @@ func buildTemplateMapMD( values["registryCACert"] = registryMirror.CACertContent } + if workerNodeGroupMachineSpec.OSFamily == anywherev1.Bottlerocket && + len(registryMirror.NamespacedRegistryMap) == 1 && + registryMirror.CoreEKSAMirror() != "" { + values["publicECRMirror"] = containerd.ToAPIEndpoint(registryMirror.CoreEKSAMirror()) + } + if registryMirror.Auth { values["registryAuth"] = registryMirror.Auth username, password, err := config.ReadCredentials() diff --git a/pkg/providers/vsphere/testdata/cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml b/pkg/providers/vsphere/testdata/cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml new file mode 100644 index 000000000000..ba3665a89029 --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml @@ -0,0 +1,114 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test +spec: + controlPlaneConfiguration: + count: 3 + endpoint: + host: 1.2.3.4 + machineGroupRef: + name: test-cp + kind: VSphereMachineConfig + kubernetesVersion: "1.21" + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + name: test-wn + kind: VSphereMachineConfig + name: md-0 + externalEtcdConfiguration: + count: 3 + machineGroupRef: + name: test-etcd + kind: VSphereMachineConfig + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + registryMirrorConfiguration: + endpoint: 1.2.3.4 + port: 1234 + ociNamespaces: + - registry: "public.ecr.aws" + namespace: "eks-anywhere" + - registry: "docker.io" + namespace: "eks-anywhere" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-cp +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: bottlerocket + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6" + users: + - name: ec2-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-wn +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: bottlerocket + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6" + users: + - name: ec2-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-etcd +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: bottlerocket + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6" + users: + - name: ec2-user + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml new file mode 100644 index 000000000000..313aa68889db --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml @@ -0,0 +1,730 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 + kind: EtcdadmCluster + name: test-etcd + namespace: eksa-system +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-control-plane-template-1234567890000 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/var/lib/kubeadm/pki/etcd/ca.crt" + certFile: "/var/lib/kubeadm/pki/server-etcd-client.crt" + keyFile: "/var/lib/kubeadm/pki/apiserver-etcd-client.key" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.3-eks-1-21-4 + pause: + imageRepository: public.ecr.aws/eks-distro/kubernetes/pause + imageTag: v1.21.2-eks-1-21-4 + bottlerocketBootstrap: + imageRepository: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap + imageTag: v1-21-4-eks-a-v0.0.0-dev-build.158 + registryMirror: + mirrors: + - registry: "docker.io" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + - registry: "public.ecr.aws" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/lib/kubeadm/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/lib/kubeadm/controller-manager.conf + mountPath: /etc/kubernetes/controller-manager.conf + name: kubeconfig + pathType: File + readOnly: true + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/lib/kubeadm/scheduler.conf + mountPath: /etc/kubernetes/scheduler.conf + name: kubeconfig + pathType: File + readOnly: true + certificatesDir: /var/lib/kubeadm/pki + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.3.7-eks-a-v0.0.0-dev-build.158 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /var/lib/kubeadm/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + joinConfiguration: + pause: + imageRepository: public.ecr.aws/eks-distro/kubernetes/pause + imageTag: v1.21.2-eks-1-21-4 + bottlerocketBootstrap: + imageRepository: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap + imageTag: v1-21-4-eks-a-v0.0.0-dev-build.158 + registryMirror: + mirrors: + - registry: "docker.io" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + - registry: "public.ecr.aws" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: ec2-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: bottlerocket + replicas: 3 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-cpi + namespace: eksa-system +spec: + strategy: Reconcile + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: test-cloud-controller-manager + - kind: Secret + name: test-cloud-provider-vsphere-credentials + - kind: ConfigMap + name: test-cpi-manifests +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: bottlerocket + bottlerocketConfig: + etcdImage: public.ecr.aws/eks-distro/etcd-io/etcd:v3.4.16-eks-1-21-4 + bootstrapImage: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap:v1-21-4-eks-a-v0.0.0-dev-build.158 + pauseImage: public.ecr.aws/eks-distro/kubernetes/pause:v1.21.2-eks-1-21-4 + cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + users: + - name: ec2-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + registryMirror: + endpoint: 1.2.3.4:1234/v2/eks-anywhere + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-etcd-template-1234567890000 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-etcd-template-1234567890000 + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +data: + username: dnNwaGVyZV91c2VybmFtZQ== + password: dnNwaGVyZV9wYXNzd29yZA== +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + data: + vsphere_server.password: dnNwaGVyZV9wYXNzd29yZA== + vsphere_server.username: dnNwaGVyZV91c2VybmFtZQ== + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.21.0-eks-d-1-21-eks-a-v0.0.0-dev-build.158 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: test-cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml new file mode 100644 index 000000000000..10608d563b70 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml @@ -0,0 +1,101 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: test-md-0-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + joinConfiguration: + pause: + imageRepository: public.ecr.aws/eks-distro/kubernetes/pause + imageTag: v1.21.2-eks-1-21-4 + bottlerocketBootstrap: + imageRepository: public.ecr.aws/l0g8r8j6/bottlerocket-bootstrap + imageTag: v1-21-4-eks-a-v0.0.0-dev-build.158 + registryMirror: + mirrors: + - registry: "docker.io" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + - registry: "public.ecr.aws" + endpoints: + - 1.2.3.4:1234/v2/eks-anywhere + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + taints: [] + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + cgroup-driver: systemd + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + users: + - name: ec2-user + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: bottlerocket +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-md-0 + namespace: eksa-system +spec: + clusterName: test + replicas: 3 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: test + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: test-md-0-template-1234567890000 + clusterName: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-md-0-1234567890000 + version: v1.21.2-eks-1-21-4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-md-0-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: 'SDDC-Datacenter' + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 4096 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/bottlerocket-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' + +--- diff --git a/pkg/providers/vsphere/vsphere_test.go b/pkg/providers/vsphere/vsphere_test.go index 559f1f3a9f6d..ac5779dbea11 100644 --- a/pkg/providers/vsphere/vsphere_test.go +++ b/pkg/providers/vsphere/vsphere_test.go @@ -1378,6 +1378,43 @@ func TestProviderGenerateDeploymentFileWithMirrorAuth(t *testing.T) { test.AssertContentToFile(t, string(md), "testdata/expected_results_mirror_with_auth_config_md.yaml") } +func TestProviderGenerateDeploymentFileForBottleRocketWithMultipleOciNamespaces(t *testing.T) { + clusterSpecManifest := "cluster_bottlerocket_mirror_config_multiple_ocinamespaces.yaml" + mockCtrl := gomock.NewController(t) + setupContext(t) + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{Name: "test"} + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + ctx := context.Background() + govc := NewDummyProviderGovcClient() + vscb, _ := newMockVSphereClientBuilder(mockCtrl) + ipValidator := mocks.NewMockIPValidator(mockCtrl) + ipValidator.EXPECT().ValidateControlPlaneIPUniqueness(clusterSpec.Cluster).Return(nil) + v := NewValidator(govc, vscb) + govc.osTag = bottlerocketOSTag + provider := newProvider( + t, + datacenterConfig, + clusterSpec.Cluster, + govc, + kubectl, + v, + ipValidator, + ) + if err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec); err != nil { + t.Fatalf("failed to setup and validate: %v", err) + } + + cp, md, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec) + if err != nil { + t.Fatalf("failed to generate cluster api spec contents: %v", err) + } + + test.AssertContentToFile(t, string(cp), "testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_cp.yaml") + test.AssertContentToFile(t, string(md), "testdata/expected_results_bottlerocket_mirror_config_multiple_ocinamespaces_md.yaml") +} + func TestUpdateKubeConfig(t *testing.T) { provider := givenProvider(t) content := []byte{} diff --git a/pkg/validations/cluster.go b/pkg/validations/cluster.go index ade4be1cef1b..e4a75f656155 100644 --- a/pkg/validations/cluster.go +++ b/pkg/validations/cluster.go @@ -43,15 +43,6 @@ func ValidateOSForRegistryMirror(clusterSpec *cluster.Spec, provider providers.P return nil } - for _, mc := range machineConfigs { - // BottleRocket accepts only one registry mirror and that is hardcoded for public.ecr.aws at this moment. - // Such a validation will be removed once CAPI is patched to support more than one endpoints for BottleRocket. - if mc.OSFamily() == v1alpha1.Bottlerocket && - (len(ociNamespaces) != 1 || ociNamespaces[0].Registry != constants.DefaultCoreEKSARegistry) { - return fmt.Errorf("%s is the only registry supported in ociNamespaces for %s", constants.DefaultCoreEKSARegistry, v1alpha1.Bottlerocket) - } - } - return nil } diff --git a/pkg/validations/cluster_test.go b/pkg/validations/cluster_test.go index ca87f30a7cd9..553b598f13a7 100644 --- a/pkg/validations/cluster_test.go +++ b/pkg/validations/cluster_test.go @@ -291,7 +291,7 @@ func TestValidateOSForRegistryMirrorNoPublicEcrRegistry(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { tt.clusterSpec.Cluster.Spec.RegistryMirrorConfiguration = test.mirrorConfig - tt.Expect(validations.ValidateOSForRegistryMirror(tt.clusterSpec, tt.provider)).To(MatchError("public.ecr.aws is the only registry supported in ociNamespaces for bottlerocket")) + tt.Expect(validations.ValidateOSForRegistryMirror(tt.clusterSpec, tt.provider)).To(Succeed()) }) } } diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 53b31606bd71..5d18e5cef6ef 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -1790,6 +1790,19 @@ func TestVSphereKubernetes129BottlerocketAuthenticatedRegistryMirror(t *testing. runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes129BottlerocketRegistryMirrorOciNamespaces(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket129(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithRegistryMirrorOciNamespaces(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + // Clone mode func TestVSphereKubernetes128FullClone(t *testing.T) { diskSize := 30 diff --git a/test/framework/registry_mirror.go b/test/framework/registry_mirror.go index 44322448a30c..6e6ce730da33 100644 --- a/test/framework/registry_mirror.go +++ b/test/framework/registry_mirror.go @@ -7,6 +7,7 @@ import ( "os" "github.com/aws/eks-anywhere/internal/pkg/api" + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/constants" ) @@ -33,6 +34,11 @@ const ( PrivateRegistryUsernameTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_USERNAME_TINKERBELL" PrivateRegistryPasswordTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_PASSWORD_TINKERBELL" PrivateRegistryCACertTinkerbellVar = "T_PRIVATE_REGISTRY_MIRROR_CA_CERT_TINKERBELL" + + RegistryMirrorOciNamespacesRegistry1Var = "T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY1" + RegistryMirrorOciNamespacesNamespace1Var = "T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE1" + RegistryMirrorOciNamespacesRegistry2Var = "T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY2" + RegistryMirrorOciNamespacesNamespace2Var = "T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE2" ) var ( @@ -41,6 +47,7 @@ var ( registryMirrorDockerAirgappedRequiredEnvVars = []string{RegistryMirrorDefaultSecurityGroup, RegistryMirrorAirgappedSecurityGroup} privateRegistryMirrorRequiredEnvVars = []string{PrivateRegistryEndpointVar, PrivateRegistryPortVar, PrivateRegistryUsernameVar, PrivateRegistryPasswordVar, PrivateRegistryCACertVar} privateRegistryMirrorTinkerbellRequiredEnvVars = []string{PrivateRegistryEndpointTinkerbellVar, PrivateRegistryPortTinkerbellVar, PrivateRegistryUsernameTinkerbellVar, PrivateRegistryPasswordTinkerbellVar, PrivateRegistryCACertTinkerbellVar} + registryMirrorOciNamespacesRequiredEnvVars = []string{RegistryMirrorOciNamespacesRegistry1Var, RegistryMirrorOciNamespacesNamespace1Var} ) // WithRegistryMirrorInsecureSkipVerify sets up e2e for registry mirrors with InsecureSkipVerify option. @@ -57,6 +64,30 @@ func WithRegistryMirrorEndpointAndCert(providerName string) ClusterE2ETestOpt { } } +// WithRegistryMirrorOciNamespaces sets up e2e for registry mirrors with ocinamespaces. +func WithRegistryMirrorOciNamespaces(providerName string) ClusterE2ETestOpt { + return func(e *ClusterE2ETest) { + var ociNamespaces []v1alpha1.OCINamespace + + checkRequiredEnvVars(e.T, registryMirrorOciNamespacesRequiredEnvVars) + ociNamespaces = append(ociNamespaces, v1alpha1.OCINamespace{ + Registry: os.Getenv(RegistryMirrorOciNamespacesRegistry1Var), + Namespace: os.Getenv(RegistryMirrorOciNamespacesNamespace1Var), + }) + + reg2val, reg2Found := os.LookupEnv(RegistryMirrorOciNamespacesRegistry2Var) + ns2val, ns2Found := os.LookupEnv(RegistryMirrorOciNamespacesNamespace2Var) + if reg2Found && ns2Found { + ociNamespaces = append(ociNamespaces, v1alpha1.OCINamespace{ + Registry: reg2val, + Namespace: ns2val, + }) + } + + setupRegistryMirrorEndpointAndCert(e, providerName, false, ociNamespaces...) + } +} + // WithAuthenticatedRegistryMirror sets up e2e for authenticated registry mirrors. func WithAuthenticatedRegistryMirror(providerName string) ClusterE2ETestOpt { return func(e *ClusterE2ETest) { @@ -116,7 +147,7 @@ func RequiredRegistryMirrorEnvVars() []string { return append(registryMirrorRequiredEnvVars, registryMirrorDockerAirgappedRequiredEnvVars...) } -func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, insecureSkipVerify bool) { +func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, insecureSkipVerify bool, ociNamespaces ...v1alpha1.OCINamespace) { var endpoint, hostPort, username, password, registryCert string port := "443" @@ -150,7 +181,7 @@ func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, certificate, err := base64.StdEncoding.DecodeString(registryCert) if err == nil { e.clusterFillers = append(e.clusterFillers, - api.WithRegistryMirror(endpoint, port, string(certificate), false, insecureSkipVerify), + api.WithRegistryMirror(endpoint, port, string(certificate), false, insecureSkipVerify, ociNamespaces...), ) } From 6813a359b6b0a1a34ab3846538d68959ba166eaa Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Mon, 25 Mar 2024 02:34:54 -0700 Subject: [PATCH 007/193] [PR BOT] Generate release testdata files (#7892) --- .../test/testdata/main-bundle-release.yaml | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index c7f16ed415ca..9ef39bab06c4 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -112,26 +112,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 + version: v1.13.13-eksa.1 cloudStack: clusterAPIController: arch: @@ -890,26 +890,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 + version: v1.13.13-eksa.1 cloudStack: clusterAPIController: arch: @@ -1668,26 +1668,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 + version: v1.13.13-eksa.1 cloudStack: clusterAPIController: arch: @@ -2446,26 +2446,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 + version: v1.13.13-eksa.1 cloudStack: clusterAPIController: arch: @@ -3224,26 +3224,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:d9f2b7617a84689087d8e10c75d915018620fcc9e3123a31498296435642cf61 + imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:5d1921b071bb9f59bc8d0e6b83fe8579669d0b34d6ffc2432baa1d15adb5872b + imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.12-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:d2c3ac2b5c4fb67a68939642a40032cb0e3a97ce49415d87aa482b9b6d663a07 + imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.12-eksa.1 - version: v1.13.12-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 + version: v1.13.13-eksa.1 cloudStack: clusterAPIController: arch: From b9f7071735d4d6576e3e3570579476a9750e33e9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:05:41 -0700 Subject: [PATCH 008/193] Bump github.com/nutanix-cloud-native/cluster-api-provider-nutanix (#7894) Bumps [github.com/nutanix-cloud-native/cluster-api-provider-nutanix](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix) from 1.3.1 to 1.3.2. - [Release notes](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases) - [Commits](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/compare/v1.3.1...v1.3.2) --- updated-dependencies: - dependency-name: github.com/nutanix-cloud-native/cluster-api-provider-nutanix dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 71dc1b541c50..4a7947af19b5 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/go-github/v35 v35.3.0 github.com/google/uuid v1.5.0 - github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.1 + github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.2 github.com/nutanix-cloud-native/prism-go-client v0.3.4 github.com/onsi/gomega v1.30.0 github.com/opencontainers/image-spec v1.1.0 diff --git a/go.sum b/go.sum index 710947e25135..b7efe80648ef 100644 --- a/go.sum +++ b/go.sum @@ -708,8 +708,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.1 h1:ugJfylfF06dnL/yi7GF1tC3S2CJrkQFDPjv5qYrrQGM= -github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.1/go.mod h1:wphe4ijJBkkMdg2ZScO/l7K/5RBAjhBGm3RsMbVjkow= +github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.2 h1:iFKeVVqMoz2VMAngWMvq89guMmhNUAfyw/cKRrFqD+c= +github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.2/go.mod h1:wphe4ijJBkkMdg2ZScO/l7K/5RBAjhBGm3RsMbVjkow= github.com/nutanix-cloud-native/prism-go-client v0.3.4 h1:bHY3VPrHHYnbRtkpGaKK+2ZmvUjNVRC55CYZbXIfnOk= github.com/nutanix-cloud-native/prism-go-client v0.3.4/go.mod h1:tTIH02E6o6AWSShr98QChoxuZl+jBhkXFixom9+fd1Y= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= From 7f04df649667232bf979d8b32323270d6c0ef154 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:25:07 -0700 Subject: [PATCH 009/193] Bump github.com/aws/aws-sdk-go from 1.51.3 to 1.51.7 in /release/cli (#7896) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.51.3 to 1.51.7. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.51.3...v1.51.7) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index c28921c93c13..b2c235a233cf 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.51.3 + github.com/aws/aws-sdk-go v1.51.7 github.com/aws/aws-sdk-go-v2 v1.26.0 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index 0b0530515efb..eba919451bca 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.3 h1:OqSyEXcJwf/XhZNVpMRgKlLA9nmbo5X8dwbll4RWxq8= -github.com/aws/aws-sdk-go v1.51.3/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= +github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From 7eef24c04256b6d9901ec3c11a99bd9bc1b5e4a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 13:51:36 -0700 Subject: [PATCH 010/193] Bump golang.org/x/crypto from 0.19.0 to 0.21.0 (#7895) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.19.0 to 0.21.0. - [Commits](https://github.com/golang/crypto/compare/v0.19.0...v0.21.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 13 ++++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 4a7947af19b5..f059068d8fd4 100644 --- a/go.mod +++ b/go.mod @@ -42,11 +42,11 @@ require ( github.com/tinkerbell/tink v0.8.0 github.com/vmware/govmomi v0.34.2 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.19.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/net v0.20.0 + golang.org/x/net v0.21.0 golang.org/x/oauth2 v0.15.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.18.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 @@ -183,7 +183,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index b7efe80648ef..ef70c9702512 100644 --- a/go.sum +++ b/go.sum @@ -974,8 +974,9 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1076,8 +1077,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1184,8 +1185,9 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1193,8 +1195,9 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 55fb3c2200b94e9a834dc336b8a0f9ea3eb3ef73 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 26 Mar 2024 15:31:12 -0700 Subject: [PATCH 011/193] Update Go modules in release/cli (#7898) --- release/cli/go.mod | 4 ++-- release/cli/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index b2c235a233cf..13cc2de4c71c 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -145,10 +145,10 @@ require ( go.opentelemetry.io/otel/trace v1.20.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index eba919451bca..06e0aca3d60e 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -669,8 +669,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -711,8 +711,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= From ddbb90866e72cfb27bacfda0e44f07ea6a83aaf4 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 26 Mar 2024 17:30:39 -0700 Subject: [PATCH 012/193] Add Nutanix external `etcd` support to changelog (#7901) --- docs/content/en/docs/whatsnew/changelog.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 3b3011a3fd17..81bae774c8a3 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -66,6 +66,7 @@ description: > - Support for Kubernetes v1.29 - Support for in-place EKS Anywhere and Kubernetes version upgrades on Bare Metal clusters - Support for horizontally scaling `etcd` count in clusters with external `etcd` deployments ([#7127](https://github.com/aws/eks-anywhere/pull/7127)) +- External `etcd` support for Nutanix ([#7550](https://github.com/aws/eks-anywhere/pull/7550)) - Etcd encryption for Nutanix ([#7565](https://github.com/aws/eks-anywhere/pull/7565)) - Nutanix Cloud Controller Manager integration ([#7534](https://github.com/aws/eks-anywhere/pull/7534)) - Enable image signing for all images used in cluster operations From 689f338f8582862f3791aa82c8ee24b3d53cbcb7 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 27 Mar 2024 15:41:36 -0700 Subject: [PATCH 013/193] [PR BOT] Generate release testdata files (#7903) --- release/cli/pkg/test/testdata/main-bundle-release.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index 9ef39bab06c4..8a5c7a370b9b 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -280,7 +280,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -1058,7 +1058,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -1836,7 +1836,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -2614,7 +2614,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 @@ -3392,7 +3392,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.4.1/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz kindNode: arch: - amd64 From 6939bfaa9d571032bddce053063d5584a02ca79c Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 28 Mar 2024 14:10:44 -0700 Subject: [PATCH 014/193] Fix alignment of links in changelog alerts (#7905) --- docs/content/en/docs/whatsnew/changelog.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 81bae774c8a3..8c838f113e11 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -9,8 +9,15 @@ description: > --- {{% alert title="Announcements" color="warning" %}} -* EKS Anywhere release `v0.19.0` introduces support for creating Kubernetes version v1.29 clusters. A conformance test was [promoted](https://github.com/kubernetes/kubernetes/pull/120069) in Kubernetes v1.29 that verifies that `Service`s serving different L4 protocols with the same port number can co-exist in a Kubernetes cluster. This is not supported in Cilium, the CNI deployed on EKS Anywhere clusters, because Cilium currently does not differentiate between TCP and UDP protocols for Kubernetes `Service`s. Hence EKS Anywhere v1.29 clusters will not pass this specific conformance test. This service protocol differentiation is being tracked in an upstream [issue](https://github.com/cilium/cilium/issues/9207) and will be supported in a future Cilium release. A future release of EKS Anywhere will include the patched Cilium version when it is available. -* The Bottlerocket project [will not be releasing](https://github.com/bottlerocket-os/bottlerocket/issues/3794) bare metal variants for Kubernetes versions v1.29 and beyond. Hence Bottlerocket is not a supported operating system for creating EKS Anywhere bare metal clusters with Kubernetes versions v1.29 and above. However, Bottlerocket is still supported for bare metal clusters running Kubernetes versions v1.28 and below. Please refer to [this](https://github.com/aws/eks-anywhere/issues/7754) pinned issue for more information regarding the deprecation. +* EKS Anywhere release `v0.19.0` introduces support for creating Kubernetes version v1.29 clusters. A conformance test was promoted in Kubernetes v1.29 that verifies that `Service`s serving different L4 protocols with the same port number can co-exist in a Kubernetes cluster. This is not supported in Cilium, the CNI deployed on EKS Anywhere clusters, because Cilium currently does not differentiate between TCP and UDP protocols for Kubernetes `Service`s. Hence EKS Anywhere v1.29 clusters will not pass this specific conformance test. This service protocol differentiation is being tracked in an upstream Cilium issue and will be supported in a future Cilium release. A future release of EKS Anywhere will include the patched Cilium version when it is available.
+ Refer to the following links for more information regarding the conformance test: + * [PR promoting multi-protocol `Service` test in Kubernetes v1.29](https://github.com/kubernetes/kubernetes/pull/120069) + * [Cilium issue for the multi-protocol `Service` feature](https://github.com/cilium/cilium/issues/9207) + * [Cilium issue for the Kubernetes v1.29 conformance failures](https://github.com/cilium/cilium/issues/29913) +* The Bottlerocket project will not be releasing bare metal variants for Kubernetes versions v1.29 and beyond. Hence Bottlerocket is not a supported operating system for creating EKS Anywhere bare metal clusters with Kubernetes versions v1.29 and above. However, Bottlerocket is still supported for bare metal clusters running Kubernetes versions v1.28 and below.
+ Refer to the following links for more information regarding the deprecation: + * [Bottlerocket announcement regarding deprecation of bare metal variants](https://github.com/bottlerocket-os/bottlerocket/issues/3794) + * [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) * On January 31, 2024, a **High**-severity vulnerability CVE-2024-21626 was published affecting all `runc` versions <= `v1.1.11`. This CVE has been fixed in runc version `v1.1.12`, which has been included in EKS Anywhere release `v0.18.6`. In order to fix this CVE in your new/existing EKS-A cluster, you **MUST** build or download new OS images pertaining to version `v0.18.6` and create/upgrade your cluster with these images.
Refer to the following links for more information on the steps to mitigate the CVE. * [AWS Security bulletin for the `runc` issue](https://aws.amazon.com/security/security-bulletins/AWS-2024-001) From 0a31b1193612fc9f974051068f747f5b8903ca95 Mon Sep 17 00:00:00 2001 From: ahreehong <46465244+ahreehong@users.noreply.github.com> Date: Fri, 29 Mar 2024 11:41:44 -0700 Subject: [PATCH 015/193] Fix proxy configuration for airgapped environments (#7913) --- pkg/helm/factory.go | 3 ++- test/e2e/airgap.go | 19 ++++++++++++++++++- test/e2e/cloudstack_test.go | 22 ++++++++++++++++++++++ test/e2e/vsphere_test.go | 15 +++++++++++++++ 4 files changed, 57 insertions(+), 2 deletions(-) diff --git a/pkg/helm/factory.go b/pkg/helm/factory.go index 0f56fe0a269b..802fa9b2ffcb 100644 --- a/pkg/helm/factory.go +++ b/pkg/helm/factory.go @@ -52,7 +52,8 @@ func (f *ClientFactory) Get(ctx context.Context, clus *anywherev1.Cluster) (Clie } r := registrymirror.FromCluster(managmentCluster) - helmClient := f.builder.BuildHelm(WithRegistryMirror(r), WithInsecure()) + p := managmentCluster.ProxyConfiguration() + helmClient := f.builder.BuildHelm(WithRegistryMirror(r), WithInsecure(), WithProxyConfig(p)) if r != nil && managmentCluster.RegistryAuth() { if err := helmClient.RegistryLogin(ctx, r.BaseRegistry, rUsername, rPassword); err != nil { diff --git a/test/e2e/airgap.go b/test/e2e/airgap.go index 470653ded6fd..f0ee7d286849 100644 --- a/test/e2e/airgap.go +++ b/test/e2e/airgap.go @@ -22,7 +22,8 @@ const ( bundleReleasePathFromArtifacts = "./eks-anywhere-downloads/bundle-release.yaml" ) -// runAirgapConfigFlow run airgap deployment but allow bootstrap cluster to access local peers. +// runAirgapConfigFlow runs an airgap deployment workflow with a registry mirror configuration, +// and allows bootstrap cluster to access local peers func runAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs string) { test.GenerateClusterConfig() test.DownloadArtifacts() @@ -39,6 +40,22 @@ func runAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs string) { test.DeleteCluster() } +// runAirgapConfigProxyFlow runs an airgapped deployment workflow with a proxy configuration, +// and allows bootstrap cluster to access local peers. +func runAirgapConfigProxyFlow(test *framework.ClusterE2ETest, localCIDRs string) { + test.GenerateClusterConfig() + test.DownloadArtifacts() + test.ExtractDownloadedArtifacts() + test.AirgapDockerContainers(localCIDRs) + test.CreateAirgappedUser(localCIDRs) + test.AssertAirgappedNetwork() + test.CreateCluster( + framework.WithSudo(airgapUsername), + framework.WithBundlesOverride(bundleReleasePathFromArtifacts), // generated by ExtractDownloadArtifacts + ) + test.DeleteCluster() +} + func runTinkerbellAirgapConfigFlow(test *framework.ClusterE2ETest, localCIDRs, kubeVersion string) { test.DownloadArtifacts() test.ExtractDownloadedArtifacts() diff --git a/test/e2e/cloudstack_test.go b/test/e2e/cloudstack_test.go index 560a4f96b3b6..90c52949b04f 100644 --- a/test/e2e/cloudstack_test.go +++ b/test/e2e/cloudstack_test.go @@ -3609,6 +3609,28 @@ func TestCloudStackKubernetes126RedhatAirgappedRegistryMirror(t *testing.T) { runAirgapConfigFlow(test, "10.0.0.1/8") } +func TestCloudStackKubernetes128RedhatAirgappedProxy(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, + framework.WithCloudStackRedhat9Kubernetes128(), + framework.WithCloudStackFillers( + framework.RemoveAllCloudStackAzs(), + framework.UpdateAddCloudStackAz3(), + ), + ), + framework.WithClusterFiller( + api.WithStackedEtcdTopology(), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + ), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithProxy(framework.CloudstackProxyRequiredEnvVars), + ) + + runAirgapConfigProxyFlow(test, "10.0.0.1/8") +} + // Workload API func TestCloudStackMulticlusterWorkloadClusterAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 5d18e5cef6ef..d18f65ac3492 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -4474,6 +4474,7 @@ func TestVSphereUpgradeKubernetesCiliumUbuntuGitHubFluxAPI(t *testing.T) { test.DeleteManagementCluster() } +// Airgapped tests func TestVSphereKubernetes128UbuntuAirgappedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4502,6 +4503,20 @@ func TestVSphereKubernetes129UbuntuAirgappedRegistryMirror(t *testing.T) { runAirgapConfigFlow(test, "195.18.0.1/16,196.18.0.1/16") } +func TestVSphereKubernetes129UbuntuAirgappedProxy(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu129(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + + runAirgapConfigProxyFlow(test, "195.18.0.1/16,196.18.0.1/16") +} + func TestVSphereKubernetesUbuntu128EtcdEncryption(t *testing.T) { test := framework.NewClusterE2ETest( t, From 95624b59b42ce7e21609c00d86865379ae07b0d9 Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Fri, 29 Mar 2024 13:26:45 -0700 Subject: [PATCH 016/193] Changelog for v0.19.2 (#7911) --- docs/content/en/docs/whatsnew/changelog.md | 24 ++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 8c838f113e11..8d6944ac5af5 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -30,6 +30,30 @@ description: > * When upgrading to a new minor version, a new OS image must be created using the new image-builder CLI pertaining to that release. {{% /alert %}} + +## [v0.19.2](https://github.com/aws/eks-anywhere/releases/tag/v0.19.2) + +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +### Changed +- Update CAPC to 0.4.10-rc1 [#3105](https://github.com/aws/eks-anywhere-build-tooling/pull/3015) +- Upgraded EKS-D: + - `v1-25-eks-34` to [`v1-25-eks-35`](https://distro.eks.amazonaws.com/releases/1-25/35/) + - `v1-26-eks-30` to [`v1-26-eks-31`](https://distro.eks.amazonaws.com/releases/1-26/31/) + - `v1-27-eks-24` to [`v1-27-eks-25`](https://distro.eks.amazonaws.com/releases/1-27/25/) + - `v1-28-eks-17` to [`v1-28-eks-18`](https://distro.eks.amazonaws.com/releases/1-28/18/) + - `v1-29-eks-6` to [`v1-29-eks-7`](https://distro.eks.amazonaws.com/releases/1-29/7/) + +### Fixed +- Fixing tinkerbell action image URIs while using registry mirror with proxy cache. + ## [v0.19.1](https://github.com/aws/eks-anywhere/releases/tag/v0.19.1) ### Supported OS version details From e277182534c72215cfd62d6ca59475424532f371 Mon Sep 17 00:00:00 2001 From: Neelam Dharnidharka <83719924+ndeksa@users.noreply.github.com> Date: Fri, 29 Mar 2024 15:20:37 -0700 Subject: [PATCH 017/193] Added default user for each provider in the etcd backup restore commands (#7918) * fixed backup restore commands * fixed backup restore commands * fixed username in backup restore commands * fixed username in backup restore commands * fixed username in backup restore commands --------- Co-authored-by: Neelam Dharnidharka --- .../etcd-backup-restore/ubuntu-rhel-etcd-backup.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md b/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md index 03f4791cb73c..792d6e5532b5 100644 --- a/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md +++ b/docs/content/en/docs/clustermgmt/etcd-backup-restore/ubuntu-rhel-etcd-backup.md @@ -24,6 +24,11 @@ EKS-Anywhere clusters use etcd as the backing store. Taking a snapshot of etcd b Etcd offers a built-in snapshot mechanism. You can take a snapshot using the `etcdctl snapshot save` or `etcdutl snapshot save` command by following the steps given below. +{{% alert title="Note" color="warning" %}} +The following commands use ec2-user as the username. For EKS Anywhere on vSphere, Bare Metal, and Snow, the default username is ec2-user. For EKS Anywhere on Apache CloudStack, the default username is capc. +For EKS Anywhere on Nutanix, the default username is eksa. The default username cannot be changed. +{{% /alert %}} + 1. Login to any one of the etcd VMs ```bash ssh -i $PRIV_KEY ec2-user@$ETCD_VM_IP @@ -93,6 +98,7 @@ scp -i $PRIV_KEY snapshot.db ec2-user@$ETCD_VM_IP:/home/ec2-user 2. To run the etcdctl or etcdutl snapshot restore command, you need to provide the following configuration parameters: * name: This is the name of the etcd member. The value of this parameter should match the value used while starting the member. This can be obtained by running: ```bash +sudo su export ETCD_NAME=$(cat /etc/etcd/etcd.env | grep ETCD_NAME | awk -F'=' '{print $2}') ``` * initial-advertise-peer-urls: This is the advertise peer URL with which this etcd member was configured. It should be the exact value with which this etcd member was started. This can be obtained by running: From 0cfc4f0b0da8ccd990622e7db554ebada829c807 Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Fri, 29 Mar 2024 15:51:37 -0700 Subject: [PATCH 018/193] Brew update patch release v0.19.2 (#7916) * Brew update patch release v0.19.2 * m --------- Co-authored-by: EKS Distro PR Bot --- .../test/testdata/main-bundle-release.yaml | 40 +++++++++---------- .../brew-version-release/CLI_RELEASE_VERSION | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index 8a5c7a370b9b..fad3842c4507 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -306,7 +306,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -315,9 +315,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -326,7 +326,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -1084,7 +1084,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1093,9 +1093,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1104,7 +1104,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -1862,7 +1862,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1871,9 +1871,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1882,7 +1882,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -2640,7 +2640,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -2649,9 +2649,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -2660,7 +2660,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -3418,7 +3418,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -3427,9 +3427,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.1/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -3438,7 +3438,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: diff --git a/release/triggers/brew-version-release/CLI_RELEASE_VERSION b/release/triggers/brew-version-release/CLI_RELEASE_VERSION index d94dbfeb9d69..77c98fcea717 100644 --- a/release/triggers/brew-version-release/CLI_RELEASE_VERSION +++ b/release/triggers/brew-version-release/CLI_RELEASE_VERSION @@ -1 +1 @@ -v0.19.1 \ No newline at end of file +v0.19.2 \ No newline at end of file From ce870ed5748956f50877eee4de38916e44fe910e Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Sun, 31 Mar 2024 23:56:11 -0700 Subject: [PATCH 019/193] [PR BOT] Generate release testdata files (#7912) From 2418c37002b79ed4dd68990bfac1ad04903ed0e9 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:41:40 -0700 Subject: [PATCH 020/193] [PR BOT] Update base image in tag file(s) (#7920) --- manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE b/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE index 865670682200..b42771e96142 100644 --- a/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE +++ b/manager/EKS_DISTRO_MINIMAL_BASE_TAG_FILE @@ -1 +1 @@ -2023-09-06-1694026927.2 +2024-04-01-1711929684.2 From b4f4e5ace151a13ea82dd88316f132786471cbba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 14:00:40 -0700 Subject: [PATCH 021/193] Bump codecov/codecov-action from 4.1.0 to 4.1.1 (#7921) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.1.0...v4.1.1) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/go-coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go-coverage.yml b/.github/workflows/go-coverage.yml index 8d94b44112c4..71e5788fd82a 100644 --- a/.github/workflows/go-coverage.yml +++ b/.github/workflows/go-coverage.yml @@ -22,7 +22,7 @@ jobs: - name: Run go test with coverage run: COVER_PROFILE=coverage.txt make coverage-unit-test - name: Codecov upload - uses: codecov/codecov-action@v4.1.0 + uses: codecov/codecov-action@v4.1.1 with: files: ./coverage.txt token: ${{ secrets.CODECOV_TOKEN }} From 7401fa8012aab78e9753995145fd260ea36b132b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 14:30:40 -0700 Subject: [PATCH 022/193] Bump github.com/google/uuid from 1.5.0 to 1.6.0 (#7923) Bumps [github.com/google/uuid](https://github.com/google/uuid) from 1.5.0 to 1.6.0. - [Release notes](https://github.com/google/uuid/releases) - [Changelog](https://github.com/google/uuid/blob/master/CHANGELOG.md) - [Commits](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) --- updated-dependencies: - dependency-name: github.com/google/uuid dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f059068d8fd4..79b8578c5cd2 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/go-github/v35 v35.3.0 - github.com/google/uuid v1.5.0 + github.com/google/uuid v1.6.0 github.com/nutanix-cloud-native/cluster-api-provider-nutanix v1.3.2 github.com/nutanix-cloud-native/prism-go-client v0.3.4 github.com/onsi/gomega v1.30.0 diff --git a/go.sum b/go.sum index ef70c9702512..580154b3d8bb 100644 --- a/go.sum +++ b/go.sum @@ -538,8 +538,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= From d47f6ed3f5426f441be6e05b2b28daed10bd6d98 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Mon, 1 Apr 2024 15:20:39 -0700 Subject: [PATCH 023/193] Update Go modules in release/cli (#7925) --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 13cc2de4c71c..d07ee705bcef 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -74,7 +74,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index 06e0aca3d60e..c06aff1b239e 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -314,8 +314,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= From ff13f2a62a75d9d6b68b2fbd8cab1ac2cd866119 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 2 Apr 2024 12:15:21 -0700 Subject: [PATCH 024/193] Add common step for downloading image-builder CLI (#7926) --- docs/content/en/docs/osmgmt/artifacts.md | 212 ++++++----------------- 1 file changed, 56 insertions(+), 156 deletions(-) diff --git a/docs/content/en/docs/osmgmt/artifacts.md b/docs/content/en/docs/osmgmt/artifacts.md index 451c8eaaadac..bcc562c73901 100644 --- a/docs/content/en/docs/osmgmt/artifacts.md +++ b/docs/content/en/docs/osmgmt/artifacts.md @@ -45,7 +45,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` ```bash @@ -63,7 +63,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` kernel: @@ -93,7 +93,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` ```bash @@ -118,7 +118,7 @@ OR Using a specific EKS Anywhere version ```bash -EKSA_RELEASE_VERSION=v0.18.0 +EKSA_RELEASE_VERSION= ``` ```bash @@ -163,7 +163,7 @@ export KUBEVERSION="1.27" Using a specific EKS Anywhere version ```bash - EKSA_RELEASE_VERSION=v0.18.0 + EKSA_RELEASE_VERSION= ``` Set the Bottlerocket image format to the desired value (`ova` for the VMware variant or `raw` for the Baremetal variant) @@ -324,6 +324,31 @@ Packer will require prior authentication with your AWS account to launch EC2 ins Prism Central Administrator permissions are required to build a Nutanix image using `image-builder`. +### Downloading the `image-builder` CLI + +You will need to download the `image-builder` CLI corresponding to the version of EKS Anywhere you are using. The `image-builder` CLI can be downloaded using the commands provided below: + +Using the latest EKS Anywhere version +```bash +EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") +``` + +OR + +Using a specific EKS Anywhere version +```bash +EKSA_RELEASE_VERSION= +``` + +```bash +cd /tmp +BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") +IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") +curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder +sudo install -m 0755 ./image-builder /usr/local/bin/image-builder +cd - +``` + ### Build vSphere OVA node images These steps use `image-builder` to create an Ubuntu-based or RHEL-based image for vSphere. Before proceeding, ensure that the above system-level, network-level and vSphere-specific [prerequisites]({{< relref "#prerequisites">}}) have been met. @@ -379,29 +404,6 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` 1. Get the latest version of `govc`: ```bash curl -L -o - "https://github.com/vmware/govmomi/releases/latest/download/govc_$(uname -s)_$(uname -m).tar.gz" | sudo tar -C /usr/local/bin -xvzf - govc @@ -490,11 +492,11 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: For vSphere use `vsphere` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--vsphere-config`: vSphere configuration file (`vsphere.json` in this example) ```bash - image-builder build --os ubuntu --hypervisor vsphere --release-channel 1-28 --vsphere-config vsphere.json + image-builder build --os ubuntu --hypervisor vsphere --release-channel 1-29 --vsphere-config vsphere.json ``` **Red Hat Enterprise Linux** @@ -504,11 +506,11 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `redhat` * `--os-version`: `8` (default: `8`) * `--hypervisor`: For vSphere use `vsphere` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--vsphere-config`: vSphere configuration file (`vsphere.json` in this example) ```bash - image-builder build --os redhat --hypervisor vsphere --release-channel 1-28 --vsphere-config vsphere.json + image-builder build --os redhat --hypervisor vsphere --release-channel 1-29 --vsphere-config vsphere.json ``` ### Build Bare Metal node images @@ -574,30 +576,6 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` - 1. Create an Ubuntu or Red Hat image: **Ubuntu** @@ -607,12 +585,11 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: `baremetal` - * `--release-channel`: A [supported EKS Distro release](https://anywhere.eks.amazonaws.com/docs/reference/support/support-versions/) - formatted as "[major]-[minor]"; for example "1-27" + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--baremetal-config`: baremetal config file if using proxy ```bash - image-builder build --os ubuntu --hypervisor baremetal --release-channel 1-27 + image-builder build --os ubuntu --hypervisor baremetal --release-channel 1-29 ``` **Red Hat Enterprise Linux (RHEL)** @@ -637,14 +614,12 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo * `--os`: `redhat` * `--os-version`: `8` (default: `8`) * `--hypervisor`: `baremetal` - * `--release-channel`: A [supported EKS Distro release](https://anywhere.eks.amazonaws.com/docs/reference/support/support-versions/) - formatted as "[major]-[minor]"; for example "1-27" + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--baremetal-config`: Bare metal config file ```bash - image-builder build --os redhat --hypervisor baremetal --release-channel 1-28 --baremetal-config baremetal.json + image-builder build --os redhat --hypervisor baremetal --release-channel 1-29 --baremetal-config baremetal.json ``` - 1. To consume the image, serve it from an accessible web server, then create the [bare metal cluster spec]({{< relref "../getting-started/baremetal/bare-spec/" >}}) configuring the `osImageURL` field URL of the image. For example: @@ -718,29 +693,6 @@ These steps use `image-builder` to create a RHEL-based image for CloudStack. Bef ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` 1. Create a CloudStack configuration file (for example, `cloudstack.json`) to provide the location of a Red Hat Enterprise Linux 8 ISO image and related checksum and Red Hat subscription information: ```json { @@ -752,19 +704,17 @@ These steps use `image-builder` to create a RHEL-based image for CloudStack. Bef } ``` >**_NOTE_**: To build the RHEL-based image, `image-builder` temporarily consumes a Red Hat subscription. That subscription is removed once the image is built. - 1. To create a RHEL-based image, run `image-builder` with the following options: * `--os`: `redhat` * `--os-version`: `8` (default: `8`) * `--hypervisor`: For CloudStack use `cloudstack` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--cloudstack-config`: CloudStack configuration file (`cloudstack.json` in this example) ```bash - image-builder build --os redhat --hypervisor cloudstack --release-channel 1-28 --cloudstack-config cloudstack.json + image-builder build --os redhat --hypervisor cloudstack --release-channel 1-29 --cloudstack-config cloudstack.json ``` - 1. To consume the resulting RHEL-based image, add it as a template to your CloudStack setup as described in [Preparing CloudStack]({{< relref "../getting-started/cloudstack/cloudstack-preparation" >}}). ### Build Snow node images @@ -822,29 +772,6 @@ These steps use `image-builder` to create an Ubuntu-based Amazon Machine Image ( ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd /home/$USER - ``` 1. Create an AMI configuration file (for example, `ami.json`) that contains various AMI parameters. For example: ```json @@ -900,19 +827,18 @@ These steps use `image-builder` to create an Ubuntu-based Amazon Machine Image ( ##### **volume_type** The type of root EBS volume, such as gp2, gp3, io1, etc. (default: `gp3`). - 1. To create an Ubuntu-based image, run `image-builder` with the following options: * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: For AMI, use `ami` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--ami-config`: AMI configuration file (`ami.json` in this example) ```bash - image-builder build --os ubuntu --hypervisor ami --release-channel 1-28 --ami-config ami.json + image-builder build --os ubuntu --hypervisor ami --release-channel 1-29 --ami-config ami.json ``` -1. After the build, the Ubuntu AMI will be available in your AWS account in the AWS region specified in your AMI configuration file. If you wish to export it as a Raw image, you can achieve this using the AWS CLI. +1. After the build, the Ubuntu AMI will be available in your AWS account in the AWS region specified in your AMI configuration file. If you wish to export it as a raw image, you can achieve this using the AWS CLI. ``` ARTIFACT_ID=$(cat | jq -r '.builds[0].artifact_id') AMI_ID=$(echo $ARTIFACT_ID | cut -d: -f2) @@ -929,7 +855,6 @@ These steps use `image-builder` to create an Ubuntu-based Amazon Machine Image ( These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV and import it into the AOS Image Service. Before proceeding, ensure that the above system-level, network-level and Nutanix-specific [prerequisites]({{< relref "#prerequisites">}}) have been met. 1. Download an [Ubuntu cloud image](https://cloud-images.ubuntu.com/releases) or [RHEL cloud image](https://access.redhat.com/downloads/content/rhel) pertaining to your desired OS and OS version and upload it to the AOS Image Service using Prism. You will need to specify the image's name in AOS as the `source_image_name` in the `nutanix.json` config file specified below. You can also skip this step and directly use the `image_url` field in the config file to provide the URL of a publicly accessible image as source. - 1. Create a Linux user for running image-builder. ```bash sudo adduser image-builder @@ -981,29 +906,6 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a ```bash python3 -m pip install --user ansible ``` - -1. Get `image-builder`: - - Using the latest EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=$(curl -sL https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.latestVersion") - ``` - - OR - - Using a specific EKS Anywhere version - ```bash - EKSA_RELEASE_VERSION=v0.18.0 - ``` - - ```bash - cd /tmp - BUNDLE_MANIFEST_URL=$(curl -s https://anywhere-assets.eks.amazonaws.com/releases/eks-a/manifest.yaml | yq ".spec.releases[] | select(.version==\"$EKSA_RELEASE_VERSION\").bundleManifestUrl") - IMAGEBUILDER_TARBALL_URI=$(curl -s $BUNDLE_MANIFEST_URL | yq ".spec.versionsBundles[0].eksD.imagebuilder.uri") - curl -s $IMAGEBUILDER_TARBALL_URI | tar xz ./image-builder - sudo install -m 0755 ./image-builder /usr/local/bin/image-builder - cd - - ``` 1. Create a `nutanix.json` config file. More details on values can be found in the [image-builder documentation](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html). See example below: ```json { @@ -1037,11 +939,11 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a * `--os`: `ubuntu` * `--os-version`: `20.04` or `22.04` (default: `20.04`) * `--hypervisor`: For Nutanix use `nutanix` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--nutanix-config`: Nutanix configuration file (`nutanix.json` in this example) ```bash - image-builder build --os ubuntu --hypervisor nutanix --release-channel 1-28 --nutanix-config nutanix.json + image-builder build --os ubuntu --hypervisor nutanix --release-channel 1-29 --nutanix-config nutanix.json ``` **Red Hat Enterprise Linux** @@ -1051,11 +953,11 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a * `--os`: `redhat` * `--os-version`: `8` or `9` (default: `8`) * `--hypervisor`: For Nutanix use `nutanix` - * `--release-channel`: Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28. + * `--release-channel`: Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29. * `--nutanix-config`: Nutanix configuration file (`nutanix.json` in this example) ```bash - image-builder build --os redhat --hypervisor nutanix --release-channel 1-28 --nutanix-config nutanix.json + image-builder build --os redhat --hypervisor nutanix --release-channel 1-29 --nutanix-config nutanix.json ``` ### Configuring OS version @@ -1094,13 +996,11 @@ These steps use `image-builder` to create a Ubuntu-based image for Nutanix AHV a 9.2 9 - Nutanix only + CloudStack and Nutanix only -Currently, Ubuntu is the only operating system that supports multiple `os-version` values. - ### Building images for a specific EKS Anywhere version This section provides information about the relationship between `image-builder` and EKS Anywhere CLI version, and provides instructions on building images pertaining to a specific EKS Anywhere version. @@ -1250,7 +1150,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be #### Building node images in an air-gapped environment 1. Identify the EKS-D release channel (generally aligning with Kubernetes version) to build. For example, 1.27 or 1.28 -2. Identify the latest release of EKS-A from [changelog]({{< ref "/docs/whatsnew/changelog" >}}). For example, v0.18.0 +2. Identify the latest release of EKS-A from [changelog]({{< ref "/docs/whatsnew/changelog" >}}). For example, 3. Run `image-builder` CLI to download manifests in an environment with internet connectivity ```bash image-builder download manifests @@ -1280,7 +1180,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be fi if [ -z "${RELEASE_CHANNEL}" ]; then - echo "RELEASE_CHANNEL not set. Supported EKS Distro releases include 1-24, 1-25, 1-26, 1-27 and 1-28" + echo "RELEASE_CHANNEL not set. Supported EKS Distro releases include 1-25, 1-26, 1-27, 1-28 and 1-29" exit 1 fi @@ -1349,7 +1249,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be ``` 5. Set EKS-A release version and EKS-D release channel as environment variables and execute the script ```bash - EKSA_RELEASE_VERSION=v0.18.0 RELEASE_CHANNEL=1-28 ./download-airgapped-artifacts.sh + EKSA_RELEASE_VERSION= RELEASE_CHANNEL=1-28 ./download-airgapped-artifacts.sh ``` Executing this script will create a local directory `eks-a-d-artifacts` and download the required EKS-A and EKS-D artifacts. 6. Create two repositories, one for EKS-A and one for EKS-D on the private artifacts server. @@ -1367,7 +1267,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be deb [trusted=yes] http:///debian focal-backports main restricted universe multiverse deb [trusted=yes] http:///debian focal-security main restricted universe multiverse ``` - `focal` in the above file refers to the name of the Ubuntu OS for version 20.04. If using Ubuntu version 22.04 replace `focal` with `jammy`. + `focal` in the above file refers to the code name for the Ubuntu 20.04 release. If using Ubuntu version 22.04, replace `focal` with `jammy`. 11. Create a provider or hypervisor configuration file and add the following fields ```json { @@ -1378,7 +1278,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be "extra_repos": "", "disable_public_repos": "true", "iso_url": "http:///ubuntu-20.04.1-legacy-server-amd64.iso", - "iso_checksum": "", "iso_checksum_type": "sha256" } ``` @@ -1388,7 +1288,7 @@ In order to use Red Hat Satellite in the image build process follow the steps be ``` -## Images +## Container Images -The various images for EKS Anywhere can be found [in the EKS Anywhere ECR repository](https://gallery.ecr.aws/eks-anywhere/). -The various images for EKS Distro can be found [in the EKS Distro ECR repository](https://gallery.ecr.aws/eks-distro/). +* The container images distributed by EKS Anywhere can be found in the [EKS Anywhere ECR Public Gallery](https://gallery.ecr.aws/eks-anywhere). +* The container images distributed by EKS Distro can be found in the [EKS Distro ECR Public Gallery](https://gallery.ecr.aws/eks-distro). From 37959a60a66a74a4093ea35eb60e12c82127cd72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:07:20 -0700 Subject: [PATCH 025/193] Bump github.com/aws/aws-sdk-go-v2 from 1.26.0 to 1.26.1 in /release/cli (#7928) Bumps [github.com/aws/aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) from 1.26.0 to 1.26.1. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.26.0...v1.26.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 4 ++-- release/cli/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index d07ee705bcef..70a52c727dbc 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/aws/aws-sdk-go v1.51.7 - github.com/aws/aws-sdk-go-v2 v1.26.0 + github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e github.com/fsouza/go-dockerclient v1.11.0 @@ -36,7 +36,7 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect - github.com/aws/smithy-go v1.20.1 // indirect + github.com/aws/smithy-go v1.20.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index c06aff1b239e..82be3ffef0af 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -58,12 +58,12 @@ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:o github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= -github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e/go.mod h1:p/KHVJAMv3kofnUnShkZ6pUnZYzm+LK2G7bIi8nnTKA= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 1a9c2d8e9d1b9a452c2ef3358747c68fea4b5a45 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Wed, 3 Apr 2024 15:04:39 -0700 Subject: [PATCH 026/193] Add apiServerExtraArgs as optional configuration to docs (#7853) * Add apiServerExtraArgs as an optional configuration to docs * Add mini design doc for configuring api server extra args --- designs/api-server-extra-args.md | 118 ++++++++++++++++++ .../getting-started/baremetal/bare-spec.md | 1 + .../getting-started/cloudstack/cloud-spec.md | 1 + .../getting-started/nutanix/nutanix-spec.md | 1 + .../optional/api-server-extra-args.md | 38 ++++++ .../en/docs/getting-started/snow/snow-spec.md | 1 + .../getting-started/vsphere/vsphere-spec.md | 1 + 7 files changed, 161 insertions(+) create mode 100644 designs/api-server-extra-args.md create mode 100644 docs/content/en/docs/getting-started/optional/api-server-extra-args.md diff --git a/designs/api-server-extra-args.md b/designs/api-server-extra-args.md new file mode 100644 index 000000000000..48fef999e4a0 --- /dev/null +++ b/designs/api-server-extra-args.md @@ -0,0 +1,118 @@ +# Allow users to configure kube-apiserver flags + +## Problem Statement + +A customer is currently using OIDC for authenticating the kubernetes service accounts(KSA) and they need some mechanism to configure the kube-apiserver flags for their usecase. The main issue that we are addressing in this document is how we want to allow users to be able to configure these flags. + +## Overview of Solution + +Allow users to configure the flags by exposing a map in the cluster spec yaml + +**Schema:** + +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: mgmt-cluster +spec: + ... + controlPlaneConfiguration: + ... + # More control plane components can be added here in the future + apiServerExtraArgs: + ... + "service-account-issuer": "https://{my-service-account-issuer-url}" + "service-account-jwks-uri": "https://{my-service-account-issuer-url}/openid/v1/jwks" + "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" + "service-account-key-file": "/etc/kubernetes/pki/sa.pub" +``` + +**Validations:** + +* Validate that oidc flags are not configured in apiServerExtraArgs if OIDCConfig identity provider is already configured in the spec +* Validate that the feature flag is enabled for configuring apiServerExtraArgs + +**Pros:** + +* Creates a standard way of exposing any flag for the control plane components +* Gives more flexibility to the users in terms of validating the flag values for the api-server + +**Cons:** + +* Does not enforce OIDC compliance or any other validations on the allowed values for the flags + +## Alternate Solutions + +Allow users to configure the flags as a struct field in the cluster spec yaml + +**Schema:** + +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: mgmt-cluster +spec: + ... + controlPlaneConfiguration: + ... + apiServerConfiguration: + ... + serviceAccountIssuer: + - "https://{my-service-account-issuer-url}" + serviceAccountJwksUri: "https://{my-service-account-issuer-url}/openid/v1/jwks" + serviceAccountSigningKeyFile: "/etc/kubernetes/pki/sa.key" + serviceAccountKeyFile: "/etc/kubernetes/pki/sa.pub" +``` + +**Validations:** + +* Validate that both serviceAccountIssuer and serviceAccountJwksUri have same domain and use https scheme +* Additional set of validations specific to each of the flags + +**Pros:** + +* Fails fast if any of the flags are misconfigured with invalid values +* Allows enforcing OIDC compliance for the service account flags of the api-server + +**Cons:** + +* Gives less flexibility to the users for configuring the flags in terms of number of validations +* Does not provide a standard way to configure the flags +* Difficult to validate each and every flag and debug any issues with apiserver + +## Implementation Details + +``` +apiServerExtraArgs: + "service-account-issuer": "https://{my-service-account-issuer-url}" + "service-account-jwks-uri": "https://{my-service-account-issuer-url}/openid/v1/jwks" +``` + +https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#apiserver-flags +These flags will be fetched from the cluster spec and added to the apiServerExtraArgs in the ClusterConfiguration object during create and upgrade operations for generating control plane CAPI spec + +Users need to enable the feature flag `API_SERVER_EXTRA_ARGS_ENABLED=true` to configure the api server flags in the cluster spec. If it's not enabled, then it will throw an error when validating the cluster spec. This is done in order to expose this functionality for now before we determine to support it officially with some more robust validations. + +The `service-account-issuer` flag can be configured for both podIamConfig as well as controlPlaneConfiguration to enable both features. If both are configured, the podIamConfig url will be appended to the controlPlaneConfiguration url. + +If OIDCConfig is specified in the identityProviderRefs within the spec, then oidc flags cannot be configured in the apiServerExtraArgs and the CLI will throw an error. + +## Documentation + +We would have to add `controlPlaneConfiguration.apiServerExtraArgs` as an optional configuration for the cluster spec in our EKS-A docs + +## Migration plan for existing flags + +* Phase 1: We can add more flags to the above options and have validations for the existing flags configured in some other fields to make sure that there is no conflict between them and allow only one of them to be configured +* Phase 2: We can decide on the priority among the existing conflicting fields and if the flags are configured in multiple fields, the one with higher priority will have precedence and will be used in the cluster +* Phase 3: We can deprecate all the lower priority conflicting fields for the existing flags and have only one standardized way of configuring all the flags + +## References + +* https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/1393-oidc-discovery +* https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery +* https://openid.net/developers/how-connect-works/ +* https://auth0.com/docs/get-started/authentication-and-authorization-flow/authorization-code-flow + diff --git a/docs/content/en/docs/getting-started/baremetal/bare-spec.md b/docs/content/en/docs/getting-started/baremetal/bare-spec.md index 8e5a9d05515d..6b256cab3a2a 100644 --- a/docs/content/en/docs/getting-started/baremetal/bare-spec.md +++ b/docs/content/en/docs/getting-started/baremetal/bare-spec.md @@ -19,6 +19,7 @@ The following additional optional configuration can also be included: * [OIDC]({{< relref "../optional/oidc.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) To generate your own cluster configuration, follow instructions from the [Create Bare Metal cluster]({{< relref "./baremetal-getstarted" >}}) section and modify it using descriptions below. For information on how to add cluster configuration settings to this file for advanced node configuration, see [Advanced Bare Metal cluster configuration]({{< relref "#advanced-bare-metal-cluster-configuration" >}}). diff --git a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md index f2c82abdbd46..375d30fdfb4e 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md @@ -18,6 +18,7 @@ The following additional optional configuration can also be included: * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md index efbdabdd17da..fe01d944aee5 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md @@ -20,6 +20,7 @@ The following additional optional configuration can also be included: * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Gitops]({{< relref "../optional/gitops.md" >}}) * [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml apiVersion: anywhere.eks.amazonaws.com/v1alpha1 diff --git a/docs/content/en/docs/getting-started/optional/api-server-extra-args.md b/docs/content/en/docs/getting-started/optional/api-server-extra-args.md new file mode 100644 index 000000000000..01846604ad3e --- /dev/null +++ b/docs/content/en/docs/getting-started/optional/api-server-extra-args.md @@ -0,0 +1,38 @@ +--- +title: "API Server Extra Args" +linkTitle: "API Server Extra Args" +weight: 60 +description: > + EKS Anywhere cluster yaml specification for Kubernetes API Server Extra Args reference +--- + +## API Server Extra Args support (optional) + +As of EKS Anywhere version v0.20.0, you can pass additional flags to configure the Kubernetes API server in your EKS Anywhere clusters. + +#### Provider support details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:--------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| **Supported?** | ✓ | ✓ | ✓ | ✓ | ✓ | + +In order to configure a cluster with API Server extra args, you need to configure your cluster by updating the cluster configuration file to include the details below. The feature flag `API_SERVER_EXTRA_ARGS_ENABLED=true` needs to be set as an environment variable. + +This is a generic template with some example API Server extra args configuration below for reference: +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: my-cluster-name +spec: + ... + controlPlaneConfiguration: + apiServerExtraArgs: + ... + disable-admission-plugins: "DefaultStorageClass,DefaultTolerationSeconds" + enable-admission-plugins: "NamespaceAutoProvision,NamespaceExists" +``` + +The above example configures the `disable-admission-plugins` and `enable-admission-plugins` options of the API Server to enable additional admission plugins or disable some of the default ones. You can configure any of the API Server options using the above template. + +### controlPlaneConfiguration.apiServerExtraArgs (optional) +Reference the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/#options) for the list of flags that can be configured for the Kubernetes API server in EKS Anywhere \ No newline at end of file diff --git a/docs/content/en/docs/getting-started/snow/snow-spec.md b/docs/content/en/docs/getting-started/snow/snow-spec.md index 096bbdb9efa9..31f81aab96df 100644 --- a/docs/content/en/docs/getting-started/snow/snow-spec.md +++ b/docs/content/en/docs/getting-started/snow/snow-spec.md @@ -18,6 +18,7 @@ The following additional optional configuration can also be included: * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml apiVersion: anywhere.eks.amazonaws.com/v1alpha1 diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md index d8104ee6c58f..dedfd422c6aa 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md @@ -111,6 +111,7 @@ The following additional optional configuration can also be included: * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Host OS Config]({{< relref "../optional/hostOSConfig.md" >}}) * [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ## Cluster Fields From c7c682f76a2a71a92750ac703c2b6fe89adb8c90 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Thu, 4 Apr 2024 00:51:33 -0700 Subject: [PATCH 027/193] [PR BOT] Generate release testdata files (#7932) --- .../test/testdata/main-bundle-release.yaml | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index fad3842c4507..36156b6e03d2 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -473,12 +473,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -487,7 +487,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -496,8 +496,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 + version: v0.4.1+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -1251,12 +1251,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -1265,7 +1265,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -1274,8 +1274,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 + version: v0.4.1+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2029,12 +2029,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2043,7 +2043,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2052,8 +2052,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 + version: v0.4.1+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2807,12 +2807,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2821,7 +2821,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2830,8 +2830,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 + version: v0.4.1+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -3585,12 +3585,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -3599,7 +3599,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.3.13-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -3608,8 +3608,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.3.13-eks-a-v0.0.0-dev-build.1 - version: v0.3.13+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 + version: v0.4.1+abcdef1 snow: bottlerocketBootstrapSnow: arch: From 989e06b4aac493b45d178ed9e93d61f4a2ab1302 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 01:05:39 -0700 Subject: [PATCH 028/193] Bump github.com/aws/aws-sdk-go from 1.51.7 to 1.51.13 in /release/cli (#7931) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.51.7 to 1.51.13. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.51.7...v1.51.13) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 70a52c727dbc..54e5498318ae 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.51.7 + github.com/aws/aws-sdk-go v1.51.13 github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index 82be3ffef0af..727635817693 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= -github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.13 h1:j6lgtz9E/XFRiYYnGNrAfWvyyTsuYvWvo2RCt0zqAIs= +github.com/aws/aws-sdk-go v1.51.13/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From 96d504b81f0f24210067f5ce308e1467e0e4c0c1 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 4 Apr 2024 10:55:41 -0700 Subject: [PATCH 029/193] Set non-empty defaults for registry mirror secret credentials (#7933) --- pkg/curatedpackages/packagecontrollerclient.go | 8 +++++--- pkg/curatedpackages/testdata/values_empty.yaml | 4 ++-- .../testdata/values_empty_registrymirrorsecret.yaml | 4 ++-- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/pkg/curatedpackages/packagecontrollerclient.go b/pkg/curatedpackages/packagecontrollerclient.go index 7188e474fe3f..5ca3f0cd03a9 100644 --- a/pkg/curatedpackages/packagecontrollerclient.go +++ b/pkg/curatedpackages/packagecontrollerclient.go @@ -34,8 +34,10 @@ import ( var secretsValueYaml string const ( - eksaDefaultRegion = "us-west-2" - valueFileName = "values.yaml" + eksaDefaultRegion = "us-west-2" + valueFileName = "values.yaml" + defaultRegistryMirrorUsername = "username" + defaultRegistryMirrorPassword = "password" ) type PackageControllerClientOpt func(client *PackageControllerClient) @@ -304,7 +306,7 @@ func (pc *PackageControllerClient) CreateHelmOverrideValuesYaml() (string, []byt func (pc *PackageControllerClient) generateHelmOverrideValues() ([]byte, error) { var err error - endpoint, username, password, caCertContent, insecureSkipVerify := "", "", "", "", "false" + endpoint, username, password, caCertContent, insecureSkipVerify := "", defaultRegistryMirrorUsername, defaultRegistryMirrorPassword, "", "false" if pc.registryMirror != nil { endpoint = pc.registryMirror.BaseRegistry username, password, err = config.ReadCredentials() diff --git a/pkg/curatedpackages/testdata/values_empty.yaml b/pkg/curatedpackages/testdata/values_empty.yaml index 0b869c7592a8..65970c5779da 100644 --- a/pkg/curatedpackages/testdata/values_empty.yaml +++ b/pkg/curatedpackages/testdata/values_empty.yaml @@ -1,7 +1,7 @@ registryMirrorSecret: endpoint: "" - username: "" - password: "" + username: "dXNlcm5hbWU=" + password: "cGFzc3dvcmQ=" cacertcontent: "" insecure: "ZmFsc2U=" awsSecret: diff --git a/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml b/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml index 0e7b260c6859..4c3038dcce45 100644 --- a/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml +++ b/pkg/curatedpackages/testdata/values_empty_registrymirrorsecret.yaml @@ -1,7 +1,7 @@ registryMirrorSecret: endpoint: "" - username: "" - password: "" + username: "dXNlcm5hbWU=" + password: "cGFzc3dvcmQ=" cacertcontent: "" insecure: "ZmFsc2U=" awsSecret: From 4328cedb1f8ebc9712d044fa41e6879c6bebbfc3 Mon Sep 17 00:00:00 2001 From: Tanvir Tatla Date: Thu, 4 Apr 2024 12:29:40 -0700 Subject: [PATCH 030/193] Use Cluster Mover in create and delete workflows (#7909) * Use Cluster Mover in create and delete workflows * update tests and mocks * additional test coverage --- Makefile | 2 +- cmd/eksctl-anywhere/cmd/createcluster.go | 4 +- cmd/eksctl-anywhere/cmd/deletecluster.go | 3 +- pkg/clustermanager/cluster_manager.go | 82 +++++++++++++ pkg/clustermanager/cluster_manager_test.go | 97 ++++++++++++--- pkg/task/task.go | 1 + pkg/workflows/create_prep.go | 17 +-- pkg/workflows/create_prep_test.go | 45 +------ pkg/workflows/interfaces/interfaces.go | 7 ++ pkg/workflows/interfaces/mocks/clients.go | 67 +++++++++- pkg/workflows/management/create.go | 4 + .../management/create_install_eksa.go | 23 +++- pkg/workflows/management/create_test.go | 104 ++++++++++++---- pkg/workflows/management/create_workload.go | 8 +- pkg/workflows/management/delete.go | 4 + .../management/delete_install_eksa.go | 57 ++++----- pkg/workflows/management/delete_test.go | 114 +++++++++++++----- pkg/workflows/workload/createcluster.go | 8 +- 18 files changed, 473 insertions(+), 174 deletions(-) diff --git a/Makefile b/Makefile index 747f5ba5d233..a02bec980b18 100644 --- a/Makefile +++ b/Makefile @@ -568,7 +568,7 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/bootstrapper/mocks/bootstrapper.go -package=mocks "github.com/aws/eks-anywhere/pkg/bootstrapper" ClusterClient ${MOCKGEN} -destination=pkg/git/providers/github/mocks/github.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/providers/github" GithubClient ${MOCKGEN} -destination=pkg/git/mocks/git.go -package=mocks "github.com/aws/eks-anywhere/pkg/git" Client,ProviderClient - ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter + ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover ${MOCKGEN} -destination=pkg/git/gogithub/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gogithub" Client ${MOCKGEN} -destination=pkg/git/gitclient/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gitclient" GoGit ${MOCKGEN} -destination=pkg/validations/mocks/docker.go -package=mocks "github.com/aws/eks-anywhere/pkg/validations" DockerExecutable diff --git a/cmd/eksctl-anywhere/cmd/createcluster.go b/cmd/eksctl-anywhere/cmd/createcluster.go index 57d3e4e76b57..6452dafc6892 100644 --- a/cmd/eksctl-anywhere/cmd/createcluster.go +++ b/cmd/eksctl-anywhere/cmd/createcluster.go @@ -190,7 +190,8 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er WithCreateClusterDefaulter(createCLIConfig). WithClusterApplier(). WithKubeconfigWriter(clusterSpec.Cluster). - WithClusterCreator(clusterSpec.Cluster) + WithClusterCreator(clusterSpec.Cluster). + WithClusterMover() if cc.timeoutOptions.noTimeouts { factory.WithNoTimeouts() @@ -274,6 +275,7 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er deps.PackageInstaller, deps.ClusterCreator, deps.EksaInstaller, + deps.ClusterMover, ) err = createMgmtCluster.Run(ctx, clusterSpec, createValidations) diff --git a/cmd/eksctl-anywhere/cmd/deletecluster.go b/cmd/eksctl-anywhere/cmd/deletecluster.go index 00e0465b3225..0893eb995d14 100644 --- a/cmd/eksctl-anywhere/cmd/deletecluster.go +++ b/cmd/eksctl-anywhere/cmd/deletecluster.go @@ -126,6 +126,7 @@ func (dc *deleteClusterOptions) deleteCluster(ctx context.Context) error { WithEksdInstaller(). WithEKSAInstaller(). WithUnAuthKubeClient(). + WithClusterMover(). Build(ctx) if err != nil { return err @@ -154,7 +155,7 @@ func (dc *deleteClusterOptions) deleteCluster(ctx context.Context) error { deleteWorkload := workload.NewDelete(deps.Provider, deps.Writer, deps.ClusterManager, deps.ClusterDeleter, deps.GitOpsFlux) err = deleteWorkload.Run(ctx, cluster, clusterSpec) } else { - deleteManagement := management.NewDelete(deps.Bootstrapper, deps.Provider, deps.Writer, deps.ClusterManager, deps.GitOpsFlux, deps.ClusterDeleter, deps.EksdInstaller, deps.EksaInstaller, deps.UnAuthKubeClient) + deleteManagement := management.NewDelete(deps.Bootstrapper, deps.Provider, deps.Writer, deps.ClusterManager, deps.GitOpsFlux, deps.ClusterDeleter, deps.EksdInstaller, deps.EksaInstaller, deps.UnAuthKubeClient, deps.ClusterMover) err = deleteManagement.Run(ctx, cluster, clusterSpec) } cleanup(deps, &err) diff --git a/pkg/clustermanager/cluster_manager.go b/pkg/clustermanager/cluster_manager.go index d14dbe3aea7d..a0ec614c7b5d 100644 --- a/pkg/clustermanager/cluster_manager.go +++ b/pkg/clustermanager/cluster_manager.go @@ -674,6 +674,73 @@ func (c *ClusterManager) PauseCAPIWorkloadClusters(ctx context.Context, manageme return nil } +func (c *ClusterManager) resumeEksaReconcileForManagementAndWorkloadClusters(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error { + clusters := &v1alpha1.ClusterList{} + err := c.clusterClient.ListObjects(ctx, eksaClusterResourceType, clusterSpec.Cluster.Namespace, managementCluster.KubeconfigFile, clusters) + if err != nil { + return err + } + + for _, w := range clusters.Items { + if w.ManagedBy() != clusterSpec.Cluster.Name { + continue + } + + if err := c.resumeReconcileForCluster(ctx, managementCluster, &w, provider); err != nil { + return err + } + } + + return nil +} + +// ResumeEKSAControllerReconcile resumes a paused EKS-Anywhere cluster. +func (c *ClusterManager) ResumeEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error { + // clear pause annotation + clusterSpec.Cluster.ClearPauseAnnotation() + provider.DatacenterConfig(clusterSpec).ClearPauseAnnotation() + + if clusterSpec.Cluster.IsSelfManaged() { + return c.resumeEksaReconcileForManagementAndWorkloadClusters(ctx, cluster, clusterSpec, provider) + } + + return c.resumeReconcileForCluster(ctx, cluster, clusterSpec.Cluster, provider) +} + +func (c *ClusterManager) resumeReconcileForCluster(ctx context.Context, clusterCreds *types.Cluster, cluster *v1alpha1.Cluster, provider providers.Provider) error { + pausedAnnotation := cluster.PausedAnnotation() + err := c.clusterClient.RemoveAnnotationInNamespace(ctx, provider.DatacenterResourceType(), cluster.Spec.DatacenterRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace) + if err != nil { + return fmt.Errorf("removing paused annotation when resuming datacenterconfig reconciliation: %v", err) + } + + if provider.MachineResourceType() != "" { + for _, machineConfigRef := range cluster.MachineConfigRefs() { + err = c.clusterClient.RemoveAnnotationInNamespace(ctx, provider.MachineResourceType(), machineConfigRef.Name, pausedAnnotation, clusterCreds, cluster.Namespace) + if err != nil { + return fmt.Errorf("removing paused annotation when resuming reconciliation for machine config %s: %v", machineConfigRef.Name, err) + } + } + } + + err = c.clusterClient.RemoveAnnotationInNamespace(ctx, cluster.ResourceType(), cluster.Name, pausedAnnotation, clusterCreds, cluster.Namespace) + if err != nil { + return fmt.Errorf("removing paused annotation when resuming cluster reconciliation: %v", err) + } + + if err = c.clusterClient.RemoveAnnotationInNamespace(ctx, + cluster.ResourceType(), + cluster.Name, + v1alpha1.ManagedByCLIAnnotation, + clusterCreds, + cluster.Namespace, + ); err != nil { + return fmt.Errorf("removing managed by CLI annotation when resuming cluster reconciliation: %v", err) + } + + return nil +} + // ResumeCAPIWorkloadClusters resumes all workload CAPI clusters except the management cluster. func (c *ClusterManager) ResumeCAPIWorkloadClusters(ctx context.Context, managementCluster *types.Cluster) error { clusters, err := c.clusterClient.GetClusters(ctx, managementCluster) @@ -693,6 +760,21 @@ func (c *ClusterManager) ResumeCAPIWorkloadClusters(ctx context.Context, managem return nil } +// AllowDeleteWhilePaused allows the deletion of paused clusters. +func (c *ClusterManager) AllowDeleteWhilePaused(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { + return c.allowDeleteWhilePaused(ctx, cluster, clusterSpec.Cluster) +} + +func (c *ClusterManager) allowDeleteWhilePaused(ctx context.Context, clusterCreds *types.Cluster, cluster *v1alpha1.Cluster) error { + allowDelete := map[string]string{v1alpha1.AllowDeleteWhenPausedAnnotation: "true"} + + if err := c.clusterClient.UpdateAnnotationInNamespace(ctx, cluster.ResourceType(), cluster.Name, allowDelete, clusterCreds, cluster.Namespace); err != nil { + return fmt.Errorf("updating paused annotation in cluster reconciliation: %v", err) + } + + return nil +} + func (c *ClusterManager) PauseEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error { if clusterSpec.Cluster.IsSelfManaged() { return c.pauseEksaReconcileForManagementAndWorkloadClusters(ctx, cluster, clusterSpec, provider) diff --git a/pkg/clustermanager/cluster_manager_test.go b/pkg/clustermanager/cluster_manager_test.go index 476cfca75239..f2c00e0863ef 100644 --- a/pkg/clustermanager/cluster_manager_test.go +++ b/pkg/clustermanager/cluster_manager_test.go @@ -771,7 +771,7 @@ func TestPauseEKSAControllerReconcileWorkloadCluster(t *testing.T) { tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed()) } -func TestPauseEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *testing.T) { +func TestResumeEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *testing.T) { tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0))) tt.clusterSpec.Cluster = &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -788,15 +788,26 @@ func TestPauseEKSAControllerReconcileWorkloadClusterUpdateAnnotationError(t *tes }, } + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.clusterName, + }, + Spec: v1alpha1.VSphereDatacenterConfigSpec{ + Insecure: true, + }, + } + pauseAnnotation := "anywhere.eks.amazonaws.com/paused" + tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType) tt.mocks.provider.EXPECT().MachineResourceType().Return("") - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(errors.New("pause eksa cluster error")) + tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, pauseAnnotation, tt.cluster, "").Return(nil) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, pauseAnnotation, tt.cluster, "").Return(errors.New("pause eksa cluster error")) - tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) + tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) } -func TestPauseEKSAControllerReconcileManagementCluster(t *testing.T) { +func TestResumeEKSAControllerReconcileManagementCluster(t *testing.T) { tt := newTest(t) tt.clusterSpec.Cluster = &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -813,6 +824,18 @@ func TestPauseEKSAControllerReconcileManagementCluster(t *testing.T) { }, } + tt.clusterSpec.Cluster.PauseReconcile() + + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.clusterName, + }, + Spec: v1alpha1.VSphereDatacenterConfigSpec{ + Insecure: true, + }, + } + pauseAnnotation := "anywhere.eks.amazonaws.com/paused" + tt.mocks.client.EXPECT(). ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}). DoAndReturn(func(_ context.Context, _, _, _ string, obj *v1alpha1.ClusterList) error { @@ -851,34 +874,31 @@ func TestPauseEKSAControllerReconcileManagementCluster(t *testing.T) { }) tt.mocks.provider.EXPECT().DatacenterResourceType().Return(eksaVSphereDatacenterResourceType).Times(2) tt.mocks.provider.EXPECT().MachineResourceType().Return("").Times(2) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil).Times(2) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, expectedPauseAnnotation, tt.cluster, "").Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace( + tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaVSphereDatacenterResourceType, tt.clusterSpec.Cluster.Spec.DatacenterRef.Name, pauseAnnotation, tt.cluster, "").Return(nil).Times(2) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, pauseAnnotation, tt.cluster, "").Return(nil) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace( tt.ctx, eksaClusterResourceType, tt.clusterSpec.Cluster.Name, - map[string]string{ - v1alpha1.ManagedByCLIAnnotation: "true", - }, + v1alpha1.ManagedByCLIAnnotation, tt.cluster, "", ).Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, eksaClusterResourceType, "workload-cluster-1", expectedPauseAnnotation, tt.cluster, "").Return(nil) - tt.mocks.client.EXPECT().UpdateAnnotationInNamespace( + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace(tt.ctx, eksaClusterResourceType, "workload-cluster-1", pauseAnnotation, tt.cluster, "").Return(nil) + tt.mocks.client.EXPECT().RemoveAnnotationInNamespace( tt.ctx, eksaClusterResourceType, "workload-cluster-1", - map[string]string{ - v1alpha1.ManagedByCLIAnnotation: "true", - }, + v1alpha1.ManagedByCLIAnnotation, tt.cluster, "", ).Return(nil) - tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed()) + tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).To(Succeed()) } -func TestPauseEKSAControllerReconcileManagementClusterListObjectsError(t *testing.T) { +func TestResumeEKSAControllerReconcileManagementClusterListObjectsError(t *testing.T) { tt := newTest(t, clustermanager.WithRetrier(retrier.NewWithMaxRetries(1, 0))) tt.clusterSpec.Cluster = &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -891,9 +911,20 @@ func TestPauseEKSAControllerReconcileManagementClusterListObjectsError(t *testin }, } + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.clusterName, + }, + Spec: v1alpha1.VSphereDatacenterConfigSpec{ + Insecure: true, + }, + } + + tt.mocks.provider.EXPECT().DatacenterConfig(tt.clusterSpec).Return(datacenterConfig) + tt.mocks.client.EXPECT().ListObjects(tt.ctx, eksaClusterResourceType, "", "", &v1alpha1.ClusterList{}).Return(errors.New("list error")) - tt.Expect(tt.clusterManager.PauseEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) + tt.Expect(tt.clusterManager.ResumeEKSAControllerReconcile(tt.ctx, tt.cluster, tt.clusterSpec, tt.mocks.provider)).NotTo(Succeed()) } func TestPauseEKSAControllerReconcileWorkloadClusterWithMachineConfig(t *testing.T) { @@ -1084,3 +1115,31 @@ func TestCreateRegistryCredSecretSuccess(t *testing.T) { err := tt.clusterManager.CreateRegistryCredSecret(tt.ctx, tt.cluster) tt.Expect(err).To(BeNil()) } + +func TestAllowDeleteWhilePaused(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "success allow delete while paused", + err: nil, + }, + { + name: "fail allow delete while paused", + err: fmt.Errorf("failure"), + }, + } + allowDelete := map[string]string{v1alpha1.AllowDeleteWhenPausedAnnotation: "true"} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tt := newTest(t) + cluster := tt.clusterSpec.Cluster + tt.mocks.client.EXPECT().UpdateAnnotationInNamespace(tt.ctx, cluster.ResourceType(), cluster.Name, allowDelete, tt.cluster, cluster.Namespace).Return(test.err) + err := tt.clusterManager.AllowDeleteWhilePaused(tt.ctx, tt.cluster, tt.clusterSpec) + expectedErr := fmt.Errorf("updating paused annotation in cluster reconciliation: %v", test.err) + tt.Expect(err).To(Or(BeNil(), MatchError(expectedErr))) + }) + } +} diff --git a/pkg/task/task.go b/pkg/task/task.go index 6f2e46d9cc55..24ac57991786 100644 --- a/pkg/task/task.go +++ b/pkg/task/task.go @@ -52,6 +52,7 @@ type CommandContext struct { OriginalError error BackupClusterStateDir string ForceCleanup bool + ClusterMover interfaces.ClusterMover } func (c *CommandContext) SetError(err error) { diff --git a/pkg/workflows/create_prep.go b/pkg/workflows/create_prep.go index bba782c31513..8e3b76d8c21c 100644 --- a/pkg/workflows/create_prep.go +++ b/pkg/workflows/create_prep.go @@ -7,22 +7,11 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/aws/eks-anywhere/pkg/workflows/interfaces" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" ) // CreateNamespaceIfNotPresent creates the namespace on the cluster if it does not already exist. -func CreateNamespaceIfNotPresent(ctx context.Context, namespace, kubeconfig string, clientFactory interfaces.ClientFactory) error { - client, err := clientFactory.BuildClientFromKubeconfig(kubeconfig) - if err != nil { - return err - } - - if err := client.Get(ctx, namespace, "", &corev1.Namespace{}); err != nil && !errors.IsNotFound(err) { - return err - } else if err == nil { - return nil - } - +func CreateNamespaceIfNotPresent(ctx context.Context, namespace string, client kubernetes.Client) error { ns := &corev1.Namespace{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -33,7 +22,7 @@ func CreateNamespaceIfNotPresent(ctx context.Context, namespace, kubeconfig stri }, } - if err = client.Create(ctx, ns); err != nil { + if err := client.Create(ctx, ns); err != nil && !errors.IsAlreadyExists(err) { return err } diff --git a/pkg/workflows/create_prep_test.go b/pkg/workflows/create_prep_test.go index a5df25c1bc92..c3857f08a3df 100644 --- a/pkg/workflows/create_prep_test.go +++ b/pkg/workflows/create_prep_test.go @@ -50,14 +50,11 @@ func newNamespace(name string) *corev1.Namespace { func TestCreateNamespaceNotExistsSuccess(t *testing.T) { test := newCreatePrepTest(t) - kubeconfig := "testpath" namespace := "test-ns" - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) test.client.EXPECT().Create(test.ctx, newNamespace(namespace)).Return(nil) - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) + err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, test.client) if err != nil { t.Fatalf("Expected nil, but got %v", err) } @@ -65,57 +62,23 @@ func TestCreateNamespaceNotExistsSuccess(t *testing.T) { func TestCreateNamespaceAlreadyExistsSuccess(t *testing.T) { test := newCreatePrepTest(t) - kubeconfig := "testpath" namespace := "default" - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(nil) + test.client.EXPECT().Create(test.ctx, newNamespace(namespace)).Return(apierrors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: ""}, "")) - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) + err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, test.client) if err != nil { t.Fatalf("Expected nil, but got %v", err) } } -func TestCreateNamespaceBuildClientFail(t *testing.T) { - test := newCreatePrepTest(t) - kubeconfig := "testpath" - namespace := "test-ns" - - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, fmt.Errorf("")) - - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) - - if err == nil { - t.Fatalf("Expected error, but got nil") - } -} - -func TestCreateNamespaceGetNamespaceFail(t *testing.T) { - test := newCreatePrepTest(t) - kubeconfig := "testpath" - namespace := "test-ns" - - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(fmt.Errorf("")) - - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) - - if err == nil { - t.Fatalf("Expected error, but got nil") - } -} - func TestCreateNamespaceFail(t *testing.T) { test := newCreatePrepTest(t) - kubeconfig := "testpath" namespace := "test-ns" - test.clientFactory.EXPECT().BuildClientFromKubeconfig(kubeconfig).Return(test.client, nil) - test.client.EXPECT().Get(test.ctx, namespace, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) test.client.EXPECT().Create(test.ctx, newNamespace(namespace)).Return(fmt.Errorf("")) - err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, kubeconfig, test.clientFactory) + err := workflows.CreateNamespaceIfNotPresent(test.ctx, namespace, test.client) if err == nil { t.Fatalf("Expected error, but got nil") diff --git a/pkg/workflows/interfaces/interfaces.go b/pkg/workflows/interfaces/interfaces.go index 60adf204d0e6..37b2d0e01c9a 100644 --- a/pkg/workflows/interfaces/interfaces.go +++ b/pkg/workflows/interfaces/interfaces.go @@ -42,6 +42,8 @@ type ClusterManager interface { Upgrade(ctx context.Context, cluster *types.Cluster, currentManagementComponents, newManagementComponents *cluster.ManagementComponents, newSpec *cluster.Spec) (*types.ChangeDiff, error) CreateRegistryCredSecret(ctx context.Context, mgmt *types.Cluster) error GenerateAWSIAMKubeconfig(ctx context.Context, cluster *types.Cluster) error + ResumeEKSAControllerReconcile(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error + AllowDeleteWhilePaused(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error } type GitOpsManager interface { @@ -98,3 +100,8 @@ type EksaInstaller interface { type ClusterDeleter interface { Run(ctx context.Context, spec *cluster.Spec, managementCluster types.Cluster) error } + +// ClusterMover moves the EKS-A cluster. +type ClusterMover interface { + Move(ctx context.Context, spec *cluster.Spec, srcClient, dstClient kubernetes.Client) error +} diff --git a/pkg/workflows/interfaces/mocks/clients.go b/pkg/workflows/interfaces/mocks/clients.go index be9f7cb42185..1e534ea03926 100644 --- a/pkg/workflows/interfaces/mocks/clients.go +++ b/pkg/workflows/interfaces/mocks/clients.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter) +// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover) // Package mocks is a generated GoMock package. package mocks @@ -99,6 +99,20 @@ func (m *MockClusterManager) EXPECT() *MockClusterManagerMockRecorder { return m.recorder } +// AllowDeleteWhilePaused mocks base method. +func (m *MockClusterManager) AllowDeleteWhilePaused(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllowDeleteWhilePaused", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AllowDeleteWhilePaused indicates an expected call of AllowDeleteWhilePaused. +func (mr *MockClusterManagerMockRecorder) AllowDeleteWhilePaused(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllowDeleteWhilePaused", reflect.TypeOf((*MockClusterManager)(nil).AllowDeleteWhilePaused), arg0, arg1, arg2) +} + // ApplyBundles mocks base method. func (m *MockClusterManager) ApplyBundles(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error { m.ctrl.T.Helper() @@ -287,6 +301,20 @@ func (mr *MockClusterManagerMockRecorder) ResumeCAPIWorkloadClusters(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeCAPIWorkloadClusters", reflect.TypeOf((*MockClusterManager)(nil).ResumeCAPIWorkloadClusters), arg0, arg1) } +// ResumeEKSAControllerReconcile mocks base method. +func (m *MockClusterManager) ResumeEKSAControllerReconcile(arg0 context.Context, arg1 *types.Cluster, arg2 *cluster.Spec, arg3 providers.Provider) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResumeEKSAControllerReconcile", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResumeEKSAControllerReconcile indicates an expected call of ResumeEKSAControllerReconcile. +func (mr *MockClusterManagerMockRecorder) ResumeEKSAControllerReconcile(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeEKSAControllerReconcile", reflect.TypeOf((*MockClusterManager)(nil).ResumeEKSAControllerReconcile), arg0, arg1, arg2, arg3) +} + // SaveLogsManagementCluster mocks base method. func (m *MockClusterManager) SaveLogsManagementCluster(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster) error { m.ctrl.T.Helper() @@ -892,3 +920,40 @@ func (mr *MockClusterDeleterMockRecorder) Run(arg0, arg1, arg2 interface{}) *gom mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockClusterDeleter)(nil).Run), arg0, arg1, arg2) } + +// MockClusterMover is a mock of ClusterMover interface. +type MockClusterMover struct { + ctrl *gomock.Controller + recorder *MockClusterMoverMockRecorder +} + +// MockClusterMoverMockRecorder is the mock recorder for MockClusterMover. +type MockClusterMoverMockRecorder struct { + mock *MockClusterMover +} + +// NewMockClusterMover creates a new mock instance. +func NewMockClusterMover(ctrl *gomock.Controller) *MockClusterMover { + mock := &MockClusterMover{ctrl: ctrl} + mock.recorder = &MockClusterMoverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClusterMover) EXPECT() *MockClusterMoverMockRecorder { + return m.recorder +} + +// Move mocks base method. +func (m *MockClusterMover) Move(arg0 context.Context, arg1 *cluster.Spec, arg2, arg3 kubernetes.Client) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Move", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Move indicates an expected call of Move. +func (mr *MockClusterMoverMockRecorder) Move(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Move", reflect.TypeOf((*MockClusterMover)(nil).Move), arg0, arg1, arg2, arg3) +} diff --git a/pkg/workflows/management/create.go b/pkg/workflows/management/create.go index 9b8e4ab60bb3..23b1425d9f81 100644 --- a/pkg/workflows/management/create.go +++ b/pkg/workflows/management/create.go @@ -22,6 +22,7 @@ type Create struct { packageInstaller interfaces.PackageInstaller clusterCreator interfaces.ClusterCreator eksaInstaller interfaces.EksaInstaller + clusterMover interfaces.ClusterMover } // NewCreate builds a new create construct. @@ -32,6 +33,7 @@ func NewCreate(bootstrapper interfaces.Bootstrapper, packageInstaller interfaces.PackageInstaller, clusterCreator interfaces.ClusterCreator, eksaInstaller interfaces.EksaInstaller, + mover interfaces.ClusterMover, ) *Create { return &Create{ bootstrapper: bootstrapper, @@ -44,6 +46,7 @@ func NewCreate(bootstrapper interfaces.Bootstrapper, packageInstaller: packageInstaller, clusterCreator: clusterCreator, eksaInstaller: eksaInstaller, + clusterMover: mover, } } @@ -62,6 +65,7 @@ func (c *Create) Run(ctx context.Context, clusterSpec *cluster.Spec, validator i PackageInstaller: c.packageInstaller, ClusterCreator: c.clusterCreator, EksaInstaller: c.eksaInstaller, + ClusterMover: c.clusterMover, } return task.NewTaskRunner(&setupAndValidateCreate{}, c.writer).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/create_install_eksa.go b/pkg/workflows/management/create_install_eksa.go index 93a91627bd5e..b24674f6b44d 100644 --- a/pkg/workflows/management/create_install_eksa.go +++ b/pkg/workflows/management/create_install_eksa.go @@ -58,15 +58,32 @@ func (s *installEksaComponentsOnWorkloadTask) Run(ctx context.Context, commandCo commandContext.ClusterSpec.Cluster.AddManagedByCLIAnnotation() commandContext.ClusterSpec.Cluster.SetManagementComponentsVersion(commandContext.ClusterSpec.EKSARelease.Spec.Version) + srcClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.BootstrapCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + dstClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.WorkloadCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + if commandContext.ClusterSpec.Cluster.Namespace != "" { - if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, commandContext.WorkloadCluster.KubeconfigFile, commandContext.ClientFactory); err != nil { + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, dstClient); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } } - logger.Info("Applying cluster spec to workload cluster") - if err = commandContext.ClusterCreator.Run(ctx, commandContext.ClusterSpec, *commandContext.WorkloadCluster); err != nil { + logger.Info("Moving cluster spec to workload cluster") + if err = commandContext.ClusterMover.Move(ctx, commandContext.ClusterSpec, srcClient, dstClient); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + if err = commandContext.ClusterManager.ResumeEKSAControllerReconcile(ctx, commandContext.WorkloadCluster, commandContext.ClusterSpec, commandContext.Provider); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } diff --git a/pkg/workflows/management/create_test.go b/pkg/workflows/management/create_test.go index 4884b57be875..3ec27e74b5a4 100644 --- a/pkg/workflows/management/create_test.go +++ b/pkg/workflows/management/create_test.go @@ -8,9 +8,7 @@ import ( "github.com/golang/mock/gomock" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -49,6 +47,7 @@ type createTestSetup struct { workflow *management.Create client *clientmocks.MockClient clientFactory *mocks.MockClientFactory + mover *mocks.MockClusterMover } func newCreateTest(t *testing.T) *createTestSetup { @@ -71,6 +70,7 @@ func newCreateTest(t *testing.T) *createTestSetup { validator := mocks.NewMockValidator(mockCtrl) client := clientmocks.NewMockClient(mockCtrl) clientFactory := mocks.NewMockClientFactory(mockCtrl) + mover := mocks.NewMockClusterMover(mockCtrl) workflow := management.NewCreate( bootstrapper, @@ -83,6 +83,7 @@ func newCreateTest(t *testing.T) *createTestSetup { packageInstaller, clusterCreator, eksaInstaller, + mover, ) for _, e := range featureEnvVars { @@ -119,6 +120,7 @@ func newCreateTest(t *testing.T) *createTestSetup { managementComponents: managementComponents, clusterSpec: clusterSpec, client: client, + mover: mover, } } @@ -217,7 +219,7 @@ func (c *createTestSetup) expectMoveManagement(err error) { c.ctx, c.bootstrapCluster, c.workloadCluster, c.workloadCluster.Name, c.clusterSpec, gomock.Any()).Return(err) } -func (c *createTestSetup) expectInstallEksaComponentsWorkload(err1, err2, err3 error) { +func (c *createTestSetup) expectInstallEksaComponentsWorkload(err1, err2, err3, err4, err5 error) { gomock.InOrder( c.eksdInstaller.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.workloadCluster).Return(err1), @@ -231,9 +233,13 @@ func (c *createTestSetup) expectInstallEksaComponentsWorkload(err1, err2, err3 e c.eksdInstaller.EXPECT().InstallEksdManifest( c.ctx, c.clusterSpec, c.workloadCluster), - c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.workloadCluster.KubeconfigFile).Return(c.client, err3), + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.bootstrapCluster.KubeconfigFile).Return(c.client, err2), - c.clusterCreator.EXPECT().Run(c.ctx, c.clusterSpec, *c.workloadCluster).Return(err2), + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.workloadCluster.KubeconfigFile).Return(c.client, err3).MaxTimes(1), + + c.mover.EXPECT().Move(c.ctx, c.clusterSpec, c.client, c.client).Return(err4).MaxTimes(1), + + c.clusterManager.EXPECT().ResumeEKSAControllerReconcile(c.ctx, c.workloadCluster, c.clusterSpec, c.provider).Return(err5).MaxTimes(1), ) } @@ -256,7 +262,6 @@ func (c *createTestSetup) expectCreateNamespace() { }, ObjectMeta: v1.ObjectMeta{Name: n}, } - c.client.EXPECT().Get(c.ctx, n, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")).MaxTimes(2) c.client.EXPECT().Create(c.ctx, ns).MaxTimes(2) } @@ -291,7 +296,7 @@ func TestCreateRunSuccess(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectWriteClusterConfig() test.expectDeleteBootstrap(nil) @@ -734,7 +739,7 @@ func TestCreateEKSAWorkloadFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, fmt.Errorf("test"), nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, fmt.Errorf("test"), nil) test.expectCreateNamespace() test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) @@ -747,7 +752,7 @@ func TestCreateEKSAWorkloadFailure(t *testing.T) { } } -func TestCreateEKSAWorkloadNamespaceFailure(t *testing.T) { +func TestCreateSrcClientFailure(t *testing.T) { test := newCreateTest(t) test.expectSetup() test.expectPreflightValidationsToPass() @@ -755,25 +760,59 @@ func TestCreateEKSAWorkloadNamespaceFailure(t *testing.T) { test.expectCAPIInstall(nil, nil, nil) test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) test.expectCreateWorkload(nil, nil, nil, nil, nil, nil) - test.expectCreateNamespace() test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - gomock.InOrder( + test.expectInstallEksaComponentsWorkload(nil, fmt.Errorf(""), nil, nil, nil) + test.expectCreateNamespace() + + test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) - test.eksdInstaller.EXPECT().InstallEksdCRDs(test.ctx, test.clusterSpec, test.workloadCluster), + test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any()) - test.eksaInstaller.EXPECT().Install( - test.ctx, logger.Get(), test.workloadCluster, test.managementComponents, test.clusterSpec), + err := test.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateDstClientFailure(t *testing.T) { + test := newCreateTest(t) + test.expectSetup() + test.expectPreflightValidationsToPass() + test.expectCreateBootstrap() + test.expectCAPIInstall(nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) + test.expectCreateWorkload(nil, nil, nil, nil, nil, nil) + test.expectInstallResourcesOnManagementTask(nil) + test.expectPauseReconcile(nil) + test.expectMoveManagement(nil) + test.expectInstallEksaComponentsWorkload(nil, nil, fmt.Errorf(""), nil, nil) + test.expectCreateNamespace() - test.provider.EXPECT().InstallCustomProviderComponents( - test.ctx, test.workloadCluster.KubeconfigFile), + test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) - test.eksdInstaller.EXPECT().InstallEksdManifest( - test.ctx, test.clusterSpec, test.workloadCluster), + test.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", test.clusterSpec.Cluster.Name), gomock.Any()) - test.clientFactory.EXPECT().BuildClientFromKubeconfig(test.workloadCluster.KubeconfigFile).Return(test.client, fmt.Errorf("")), - ) + err := test.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateEKSAResumeWorkloadFailure(t *testing.T) { + test := newCreateTest(t) + test.expectSetup() + test.expectPreflightValidationsToPass() + test.expectCreateBootstrap() + test.expectCAPIInstall(nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) + test.expectCreateWorkload(nil, nil, nil, nil, nil, nil) + test.expectInstallResourcesOnManagementTask(nil) + test.expectPauseReconcile(nil) + test.expectMoveManagement(nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, fmt.Errorf("test")) + test.expectCreateNamespace() test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) @@ -796,7 +835,7 @@ func TestCreateGitOPsFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectCreateNamespace() test.expectDatacenterConfig() test.expectMachineConfigs() @@ -825,7 +864,7 @@ func TestCreateWriteConfigFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectPreflightValidationsToPass() test.expectCreateNamespace() @@ -860,7 +899,7 @@ func TestCreateWriteConfigAWSIAMFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectPreflightValidationsToPass() test.clusterSpec.AWSIamConfig = &v1alpha1.AWSIamConfig{} @@ -896,7 +935,7 @@ func TestCreateRunDeleteBootstrapFailure(t *testing.T) { test.expectInstallResourcesOnManagementTask(nil) test.expectPauseReconcile(nil) test.expectMoveManagement(nil) - test.expectInstallEksaComponentsWorkload(nil, nil, nil) + test.expectInstallEksaComponentsWorkload(nil, nil, nil, nil, nil) test.expectInstallGitOpsManager() test.expectWriteClusterConfig() test.expectDeleteBootstrap(fmt.Errorf("test")) @@ -912,3 +951,20 @@ func TestCreateRunDeleteBootstrapFailure(t *testing.T) { t.Fatalf("Create.Run() err = %v, want err = nil", err) } } + +func TestCreateNamespaceClientFailure(t *testing.T) { + test := newCreateTest(t) + test.expectSetup() + test.expectCreateBootstrap() + test.expectPreflightValidationsToPass() + test.expectCAPIInstall(nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil) + test.clientFactory.EXPECT().BuildClientFromKubeconfig(test.bootstrapCluster.KubeconfigFile).Return(test.client, fmt.Errorf("")) + test.clusterManager.EXPECT().SaveLogsManagementCluster(test.ctx, test.clusterSpec, test.bootstrapCluster) + test.writer.EXPECT().Write("test-cluster-checkpoint.yaml", gomock.Any(), gomock.Any()) + + err := test.run() + if err == nil { + t.Fatalf("Create.Run() err = %v, want err = nil", err) + } +} diff --git a/pkg/workflows/management/create_workload.go b/pkg/workflows/management/create_workload.go index e3289a297816..bc8f5fc946b3 100644 --- a/pkg/workflows/management/create_workload.go +++ b/pkg/workflows/management/create_workload.go @@ -18,8 +18,14 @@ func (s *createWorkloadClusterTask) Run(ctx context.Context, commandContext *tas commandContext.ClusterSpec.Cluster.AddManagedByCLIAnnotation() commandContext.ClusterSpec.Cluster.SetManagementComponentsVersion(commandContext.ClusterSpec.EKSARelease.Spec.Version) + client, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.BootstrapCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + if commandContext.ClusterSpec.Cluster.Namespace != "" { - if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, commandContext.BootstrapCluster.KubeconfigFile, commandContext.ClientFactory); err != nil { + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, client); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } diff --git a/pkg/workflows/management/delete.go b/pkg/workflows/management/delete.go index a8af5e4b38aa..9ed7adf86484 100644 --- a/pkg/workflows/management/delete.go +++ b/pkg/workflows/management/delete.go @@ -22,6 +22,7 @@ type Delete struct { eksdInstaller interfaces.EksdInstaller eksaInstaller interfaces.EksaInstaller clientFactory interfaces.ClientFactory + clusterMover interfaces.ClusterMover } // NewDelete builds a new delete construct. @@ -34,6 +35,7 @@ func NewDelete(bootstrapper interfaces.Bootstrapper, eksdInstaller interfaces.EksdInstaller, eksaInstaller interfaces.EksaInstaller, clientFactory interfaces.ClientFactory, + mover interfaces.ClusterMover, ) *Delete { return &Delete{ bootstrapper: bootstrapper, @@ -45,6 +47,7 @@ func NewDelete(bootstrapper interfaces.Bootstrapper, eksdInstaller: eksdInstaller, eksaInstaller: eksaInstaller, clientFactory: clientFactory, + clusterMover: mover, } } @@ -62,6 +65,7 @@ func (c *Delete) Run(ctx context.Context, workload *types.Cluster, clusterSpec * EksdInstaller: c.eksdInstaller, EksaInstaller: c.eksaInstaller, ClientFactory: c.clientFactory, + ClusterMover: c.clusterMover, } return task.NewTaskRunner(&setupAndValidateDelete{}, c.writer).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/delete_install_eksa.go b/pkg/workflows/management/delete_install_eksa.go index b7dd96e6c93c..30f447113985 100644 --- a/pkg/workflows/management/delete_install_eksa.go +++ b/pkg/workflows/management/delete_install_eksa.go @@ -3,15 +3,9 @@ package management import ( "context" - "github.com/pkg/errors" - - "github.com/aws/eks-anywhere/pkg/clients/kubernetes" - "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/task" - "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/workflows" - "github.com/aws/eks-anywhere/pkg/workflows/interfaces" ) type installEksaComponentsOnBootstrapForDeleteTask struct{} @@ -24,16 +18,34 @@ func (s *installEksaComponentsOnBootstrapForDeleteTask) Run(ctx context.Context, return &workflows.CollectDiagnosticsTask{} } - commandContext.ClusterSpec.Cluster.PauseReconcile() - commandContext.ClusterSpec.Cluster.AllowDeleteWhilePaused() - commandContext.ClusterSpec.Cluster.SetFinalizers([]string{"clusters.anywhere.eks.amazonaws.com/finalizer"}) - commandContext.ClusterSpec.Cluster.AddManagedByCLIAnnotation() - err = applyClusterSpecOnBootstrapForDeleteTask(ctx, commandContext.ClusterSpec, commandContext.BootstrapCluster, commandContext.ClientFactory) + srcClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.WorkloadCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + dstClient, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.BootstrapCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, dstClient); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + err = commandContext.ClusterMover.Move(ctx, commandContext.ClusterSpec, srcClient, dstClient) if err != nil { commandContext.SetError(err) return &workflows.CollectDiagnosticsTask{} } + if err = commandContext.ClusterManager.AllowDeleteWhilePaused(ctx, commandContext.BootstrapCluster, commandContext.ClusterSpec); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + return &deleteManagementCluster{} } @@ -48,26 +60,3 @@ func (s *installEksaComponentsOnBootstrapForDeleteTask) Restore(ctx context.Cont func (s *installEksaComponentsOnBootstrapForDeleteTask) Checkpoint() *task.CompletedTask { return nil } - -func applyClusterSpecOnBootstrapForDeleteTask(ctx context.Context, spec *cluster.Spec, cluster *types.Cluster, clientFactory interfaces.ClientFactory) error { - if err := workflows.CreateNamespaceIfNotPresent(ctx, spec.Cluster.Namespace, cluster.KubeconfigFile, clientFactory); err != nil { - return errors.Wrapf(err, "creating namespace on bootstrap") - } - - client, err := clientFactory.BuildClientFromKubeconfig(cluster.KubeconfigFile) - if err != nil { - return errors.Wrap(err, "building client to apply cluster spec changes") - } - - for _, obj := range spec.ClusterAndChildren() { - if err := client.ApplyServerSide(ctx, - "eks-a-cli", - obj, - kubernetes.ApplyServerSideOptions{ForceOwnership: true}, - ); err != nil { - return errors.Wrapf(err, "applying cluster spec") - } - } - - return nil -} diff --git a/pkg/workflows/management/delete_test.go b/pkg/workflows/management/delete_test.go index 32c2cac65325..05162cf57995 100644 --- a/pkg/workflows/management/delete_test.go +++ b/pkg/workflows/management/delete_test.go @@ -8,9 +8,6 @@ import ( "github.com/golang/mock/gomock" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -48,6 +45,7 @@ type deleteTestSetup struct { clientFactory *mocks.MockClientFactory managementComponents *cluster.ManagementComponents client *clientmocks.MockClient + mover *mocks.MockClusterMover } func newDeleteTest(t *testing.T) *deleteTestSetup { @@ -73,6 +71,7 @@ func newDeleteTest(t *testing.T) *deleteTestSetup { s.GitOpsConfig = &v1alpha1.GitOpsConfig{} }) managementComponents := cluster.ManagementComponentsFromBundles(clusterSpec.Bundles) + mover := mocks.NewMockClusterMover(mockCtrl) workload := management.NewDelete( bootstrapper, @@ -84,6 +83,7 @@ func newDeleteTest(t *testing.T) *deleteTestSetup { eksdInstaller, eksaInstaller, clientFactory, + mover, ) for _, e := range featureEnvVars { @@ -110,6 +110,7 @@ func newDeleteTest(t *testing.T) *deleteTestSetup { clientFactory: clientFactory, managementComponents: managementComponents, client: client, + mover: mover, } } @@ -166,7 +167,7 @@ func (c *deleteTestSetup) expectMoveCAPI(err1, err2 error) { c.clusterManager.EXPECT().MoveCAPI(c.ctx, c.workloadCluster, c.bootstrapCluster, c.workloadCluster.Name, c.clusterSpec, gomock.Any()).Return(err2) } -func (c *deleteTestSetup) expectInstallEksaComponentsBootstrap(err1, err2, err3, err4, err5 error) { +func (c *deleteTestSetup) expectInstallEksaComponentsBootstrap(err1, err2, err3, err4, err5, err6, err7, err8, err9 error) { gomock.InOrder( c.eksdInstaller.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.bootstrapCluster).Return(err1).AnyTimes(), @@ -179,7 +180,15 @@ func (c *deleteTestSetup) expectInstallEksaComponentsBootstrap(err1, err2, err3, c.eksdInstaller.EXPECT().InstallEksdManifest( c.ctx, c.clusterSpec, c.bootstrapCluster).Return(err4).AnyTimes(), - c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.bootstrapCluster.KubeconfigFile).Return(c.client, err5).AnyTimes(), + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.workloadCluster.KubeconfigFile).Return(c.client, err5).MaxTimes(1), + + c.clientFactory.EXPECT().BuildClientFromKubeconfig(c.bootstrapCluster.KubeconfigFile).Return(c.client, err6).MaxTimes(1), + + c.client.EXPECT().Create(c.ctx, gomock.AssignableToTypeOf(&corev1.Namespace{})).Return(err7).AnyTimes(), + + c.mover.EXPECT().Move(c.ctx, c.clusterSpec, c.client, c.client).Return(err8).AnyTimes(), + + c.clusterManager.EXPECT().AllowDeleteWhilePaused(c.ctx, c.bootstrapCluster, c.clusterSpec).Return(err9).AnyTimes(), ) } @@ -195,19 +204,6 @@ func (c *deleteTestSetup) expectApplyOnBootstrap(err error) { c.client.EXPECT().ApplyServerSide(c.ctx, "eks-a-cli", gomock.Any(), gomock.Any()).Return(err).AnyTimes() } -func (c *deleteTestSetup) expectCreateNamespace() { - n := c.clusterSpec.Cluster.Namespace - ns := &corev1.Namespace{ - TypeMeta: v1.TypeMeta{ - APIVersion: "v1", - Kind: "Namespace", - }, - ObjectMeta: v1.ObjectMeta{Name: n}, - } - c.client.EXPECT().Get(c.ctx, n, "", &corev1.Namespace{}).Return(apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: ""}, "")) - c.client.EXPECT().Create(c.ctx, ns) -} - func TestDeleteRunSuccess(t *testing.T) { features.ClearCache() os.Setenv(features.UseControllerForCli, "true") @@ -218,12 +214,11 @@ func TestDeleteRunSuccess(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) test.expectDeleteCluster(nil, nil) test.expectCleanupGitRepo(nil) test.expectDeleteBootstrap(nil) - test.expectCreateNamespace() err := test.run() if err != nil { @@ -355,7 +350,7 @@ func TestDeleteRunFailResumeReconcile(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(fmt.Errorf(""), nil, nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(fmt.Errorf(""), nil, nil, nil, nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() err := test.run() @@ -374,7 +369,7 @@ func TestDeleteRunFailAddAnnotation(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, fmt.Errorf(""), nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, fmt.Errorf(""), nil, nil, nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() @@ -394,7 +389,7 @@ func TestDeleteRunFailProviderInstall(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, fmt.Errorf(""), nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, fmt.Errorf(""), nil, nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() @@ -414,7 +409,7 @@ func TestDeleteRunFailEksdInstall(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, fmt.Errorf(""), nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, fmt.Errorf(""), nil, nil, nil, nil, nil) test.expectSaveLogsManagement() test.expectSaveLogsWorkload() @@ -424,7 +419,7 @@ func TestDeleteRunFailEksdInstall(t *testing.T) { } } -func TestDeleteRunFailBuildClient(t *testing.T) { +func TestDeleteRunFailBuildSrcClient(t *testing.T) { features.ClearCache() os.Setenv(features.UseControllerForCli, "true") test := newDeleteTest(t) @@ -434,9 +429,65 @@ func TestDeleteRunFailBuildClient(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, fmt.Errorf("")) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, fmt.Errorf(""), nil, nil, nil, nil) + test.expectSaveLogsManagement() + + err := test.run() + if err == nil { + t.Fatalf("Delete.Run() err = %v, want err = nil", err) + } +} + +func TestDeleteRunFailBuildDstClient(t *testing.T) { + features.ClearCache() + os.Setenv(features.UseControllerForCli, "true") + test := newDeleteTest(t) + test.expectSetup(nil) + test.expectBootstrapOpts(nil) + test.expectCreateBootstrap(nil) + test.expectPreCAPI(nil) + test.expectInstallCAPI(nil) + test.expectMoveCAPI(nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, fmt.Errorf(""), nil, nil, nil) + test.expectSaveLogsManagement() + + err := test.run() + if err == nil { + t.Fatalf("Delete.Run() err = %v, want err = nil", err) + } +} + +func TestDeleteRunFailCreateNamespace(t *testing.T) { + features.ClearCache() + os.Setenv(features.UseControllerForCli, "true") + test := newDeleteTest(t) + test.expectSetup(nil) + test.expectBootstrapOpts(nil) + test.expectCreateBootstrap(nil) + test.expectPreCAPI(nil) + test.expectInstallCAPI(nil) + test.expectMoveCAPI(nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, fmt.Errorf(""), nil, nil) + test.expectSaveLogsManagement() + + err := test.run() + if err == nil { + t.Fatalf("Delete.Run() err = %v, want err = nil", err) + } +} + +func TestDeleteRunFailAllowDeleteWhilePaused(t *testing.T) { + features.ClearCache() + os.Setenv(features.UseControllerForCli, "true") + test := newDeleteTest(t) + test.expectSetup(nil) + test.expectBootstrapOpts(nil) + test.expectCreateBootstrap(nil) + test.expectPreCAPI(nil) + test.expectInstallCAPI(nil) + test.expectMoveCAPI(nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, fmt.Errorf("")) test.expectSaveLogsManagement() - test.expectSaveLogsWorkload() err := test.run() if err == nil { @@ -454,8 +505,7 @@ func TestDeleteRunFailPostDelete(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) - test.expectCreateNamespace() + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) test.expectDeleteCluster(nil, fmt.Errorf("")) test.expectSaveLogsManagement() @@ -476,8 +526,7 @@ func TestDeleteRunFailCleanupGit(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) - test.expectCreateNamespace() + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) test.expectDeleteCluster(nil, nil) test.expectCleanupGitRepo(fmt.Errorf("")) @@ -500,9 +549,8 @@ func TestDeleteRunFailDeleteBootstrap(t *testing.T) { test.expectPreCAPI(nil) test.expectInstallCAPI(nil) test.expectMoveCAPI(nil, nil) - test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil, nil, nil, nil, nil, nil, nil) test.expectApplyOnBootstrap(nil) - test.expectCreateNamespace() test.expectDeleteCluster(nil, nil) test.expectCleanupGitRepo(nil) test.expectDeleteBootstrap(fmt.Errorf("")) diff --git a/pkg/workflows/workload/createcluster.go b/pkg/workflows/workload/createcluster.go index b6bca3394383..0c93e91b4dec 100644 --- a/pkg/workflows/workload/createcluster.go +++ b/pkg/workflows/workload/createcluster.go @@ -15,8 +15,14 @@ type createCluster struct{} func (c *createCluster) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { logger.Info("Creating workload cluster") + client, err := commandContext.ClientFactory.BuildClientFromKubeconfig(commandContext.ManagementCluster.KubeconfigFile) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + if commandContext.ClusterSpec.Cluster.Namespace != "" { - if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, commandContext.ManagementCluster.KubeconfigFile, commandContext.ClientFactory); err != nil { + if err := workflows.CreateNamespaceIfNotPresent(ctx, commandContext.ClusterSpec.Cluster.Namespace, client); err != nil { commandContext.SetError(err) return &workflows.CollectMgmtClusterDiagnosticsTask{} } From 471504b8ba389ae42c4bf09efaf668eb61807317 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Fri, 5 Apr 2024 16:31:48 -0700 Subject: [PATCH 031/193] [PR BOT] Generate release testdata files (#7936) --- release/cli/pkg/test/testdata/main-bundle-release.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index 36156b6e03d2..ae4d2148612c 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -412,7 +412,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 version: v2.2.3+abcdef1 haproxy: image: @@ -1190,7 +1190,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 version: v2.2.3+abcdef1 haproxy: image: @@ -1968,7 +1968,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 version: v2.2.3+abcdef1 haproxy: image: @@ -2746,7 +2746,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 version: v2.2.3+abcdef1 haproxy: image: @@ -3524,7 +3524,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 version: v2.2.3+abcdef1 haproxy: image: From add22721ff384e3b53acd9d779c7f750e0f788f0 Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Mon, 8 Apr 2024 15:11:57 -0700 Subject: [PATCH 032/193] Properly generate support bundle for packages test failures (#7941) --- test/e2e/curatedpackages.go | 1 - test/framework/cluster.go | 25 +++++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/test/e2e/curatedpackages.go b/test/e2e/curatedpackages.go index cd7f703f2f54..b12718fde132 100644 --- a/test/e2e/curatedpackages.go +++ b/test/e2e/curatedpackages.go @@ -21,7 +21,6 @@ import ( func runCuratedPackageInstall(test *framework.ClusterE2ETest) { test.SetPackageBundleActive() - test.GenerateSupportBundleOnCleanupIfTestFailed() err := WaitForPackageToBeInstalled(test, context.Background(), "eks-anywhere-packages", 3*time.Minute) if err != nil { test.T.Fatalf("packages controller not in installed state: %s", err) diff --git a/test/framework/cluster.go b/test/framework/cluster.go index fb7126e017f1..a0296accc614 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -954,6 +954,15 @@ func (e *ClusterE2ETest) GenerateSupportBundleOnCleanupIfTestFailed(opts ...Comm }) } +// GenerateSupportBundleIfTestFailed runs generates a support bundle if the test failed. +func (e *ClusterE2ETest) GenerateSupportBundleIfTestFailed(opts ...CommandOpt) { + if e.T.Failed() { + e.T.Log("Generating support bundle for failed test") + generateSupportBundleArgs := []string{"generate", "support-bundle", "-f", e.ClusterConfigLocation} + e.RunEKSA(generateSupportBundleArgs, opts...) + } +} + func (e *ClusterE2ETest) Run(name string, args ...string) { cmd, err := prepareCommand(name, args...) if err != nil { @@ -1010,14 +1019,6 @@ func (e *ClusterE2ETest) StopIfFailed() { } } -func (e *ClusterE2ETest) cleanup(f func()) { - e.T.Cleanup(func() { - if !e.T.Failed() { - f() - } - }) -} - // Cluster builds a cluster obj using the ClusterE2ETest name and kubeconfig. func (e *ClusterE2ETest) Cluster() *types.Cluster { return &types.Cluster{ @@ -1286,7 +1287,10 @@ func (e *ClusterE2ETest) InstallLocalStorageProvisioner() { func (e *ClusterE2ETest) WithCluster(f func(e *ClusterE2ETest)) { e.GenerateClusterConfig() e.CreateCluster() - defer e.DeleteCluster() + defer func() { + e.GenerateSupportBundleIfTestFailed() + e.DeleteCluster() + }() f(e) } @@ -1374,7 +1378,6 @@ func (e *ClusterE2ETest) printDeploymentSpec(ctx context.Context, ns string) { func (e *ClusterE2ETest) VerifyHelloPackageInstalled(packageName string, mgmtCluster *types.Cluster) { ctx := context.Background() packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName) - e.GenerateSupportBundleOnCleanupIfTestFailed() // Log Package/Deployment outputs defer func() { @@ -1407,7 +1410,6 @@ func (e *ClusterE2ETest) VerifyHelloPackageInstalled(packageName string, mgmtClu func (e *ClusterE2ETest) VerifyAdotPackageInstalled(packageName, targetNamespace string) { ctx := context.Background() packageMetadatNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName) - e.GenerateSupportBundleOnCleanupIfTestFailed() e.T.Log("Waiting for package", packageName, "to be installed") err := e.KubectlClient.WaitForPackagesInstalled(ctx, @@ -2111,7 +2113,6 @@ func (e *ClusterE2ETest) MatchLogs(targetNamespace, targetPodName string, ) { e.T.Logf("Match logs for pod %s, container %s in namespace %s", targetPodName, targetContainerName, targetNamespace) - e.GenerateSupportBundleOnCleanupIfTestFailed() err := retrier.New(timeout).Retry(func() error { logs, err := e.KubectlClient.GetPodLogs(context.TODO(), targetNamespace, From ecd6c6085511f8cb0daea8fd8d7218a62e238f40 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:41:49 -0700 Subject: [PATCH 033/193] Bump codecov/codecov-action from 4.1.1 to 4.2.0 (#7939) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.1.1 to 4.2.0. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.1.1...v4.2.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/go-coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go-coverage.yml b/.github/workflows/go-coverage.yml index 71e5788fd82a..2869683e6691 100644 --- a/.github/workflows/go-coverage.yml +++ b/.github/workflows/go-coverage.yml @@ -22,7 +22,7 @@ jobs: - name: Run go test with coverage run: COVER_PROFILE=coverage.txt make coverage-unit-test - name: Codecov upload - uses: codecov/codecov-action@v4.1.1 + uses: codecov/codecov-action@v4.2.0 with: files: ./coverage.txt token: ${{ secrets.CODECOV_TOKEN }} From 89ed3dd323e2d54de2ec8610810c4c7c0fed0dd1 Mon Sep 17 00:00:00 2001 From: Xu Deng Date: Tue, 9 Apr 2024 13:05:51 -0400 Subject: [PATCH 034/193] Bump x/net (#7945) --- go.mod | 2 +- go.sum | 4 ++-- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- .../cli/pkg/test/testdata/main-bundle-release.yaml | 14 +++++++------- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index 79b8578c5cd2..ec279d55d694 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( go.uber.org/zap v1.26.0 golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/net v0.21.0 + golang.org/x/net v0.23.0 golang.org/x/oauth2 v0.15.0 golang.org/x/sys v0.18.0 golang.org/x/text v0.14.0 diff --git a/go.sum b/go.sum index 580154b3d8bb..602b3d6c6c84 100644 --- a/go.sum +++ b/go.sum @@ -1077,8 +1077,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/release/cli/go.mod b/release/cli/go.mod index 54e5498318ae..1294042dd1e6 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -148,7 +148,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index 727635817693..724954afe1c6 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -711,8 +711,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index ae4d2148612c..483fef030604 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -252,7 +252,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -782,7 +782,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cloud-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.25.1-eks-d-1-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.25.3-eks-d-1-25-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml version: v1.8.5+abcdef1 @@ -1030,7 +1030,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -1560,7 +1560,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cloud-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.26.0-eks-d-1-26-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.26.2-eks-d-1-26-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml version: v1.8.5+abcdef1 @@ -1808,7 +1808,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -2586,7 +2586,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -3364,7 +3364,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.14/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 From 44e0b88cd83800a6691c58769d7f9053bf0de96b Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Tue, 9 Apr 2024 11:04:46 -0700 Subject: [PATCH 035/193] pull images from public ecr for e2e (#7946) --- test/framework/cluster.go | 7 +- test/framework/testdata/hpa_busybox.yaml | 2 +- .../testdata/local-path-storage.yaml | 129 ++++++++++++++++++ 3 files changed, 134 insertions(+), 4 deletions(-) create mode 100644 test/framework/testdata/local-path-storage.yaml diff --git a/test/framework/cluster.go b/test/framework/cluster.go index a0296accc614..60891c3a03c4 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -71,6 +71,9 @@ var oidcRoles []byte //go:embed testdata/hpa_busybox.yaml var hpaBusybox []byte +//go:embed testdata/local-path-storage.yaml +var localPathProvisioner []byte + type ClusterE2ETest struct { T T ClusterConfigLocation string @@ -1275,9 +1278,7 @@ func (e *ClusterE2ETest) UninstallCuratedPackage(packagePrefix string, opts ...s func (e *ClusterE2ETest) InstallLocalStorageProvisioner() { ctx := context.Background() - _, err := e.KubectlClient.ExecuteCommand(ctx, "apply", "-f", - "https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml", - "--kubeconfig", e.KubeconfigFilePath()) + err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), localPathProvisioner) if err != nil { e.T.Fatalf("Error installing local-path-provisioner: %v", err) } diff --git a/test/framework/testdata/hpa_busybox.yaml b/test/framework/testdata/hpa_busybox.yaml index 55d690289be1..38459830d4c6 100644 --- a/test/framework/testdata/hpa_busybox.yaml +++ b/test/framework/testdata/hpa_busybox.yaml @@ -14,7 +14,7 @@ spec: spec: containers: - name: busybox - image: busybox:1.34 + image: public.ecr.aws/docker/library/busybox:1.36 resources: limits: cpu: 50m diff --git a/test/framework/testdata/local-path-storage.yaml b/test/framework/testdata/local-path-storage.yaml new file mode 100644 index 000000000000..cad22c1e2150 --- /dev/null +++ b/test/framework/testdata/local-path-storage.yaml @@ -0,0 +1,129 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [ "" ] + resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "endpoints", "persistentvolumes", "pods" ] + verbs: [ "*" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: public.ecr.aws/eks-anywhere/rancher/local-path-provisioner:v0.0.26-eks-a-62 + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-path +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["/opt/local-path-provisioner"] + } + ] + } + setup: |- + #!/bin/sh + set -eu + mkdir -m 0777 -p "$VOL_DIR" + teardown: |- + #!/bin/sh + set -eu + rm -rf "$VOL_DIR" + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: public.ecr.aws/docker/library/busybox:1.36 + imagePullPolicy: IfNotPresent + From 5a53dd6bb0ffc92346dc3e7a9b03057521ad252e Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Tue, 9 Apr 2024 12:39:37 -0700 Subject: [PATCH 036/193] ensure no duplicate networks for cloudstack (#7943) --- cmd/eks-a-tool/cmd/cloudstackrmvms.go | 2 +- .../buildspecs/cloudstack-test-eks-a-cli.yml | 3 +- .../build/buildspecs/quick-test-eks-a-cli.yml | 3 +- cmd/integration_test/cmd/cleanupcloudstack.go | 6 +- internal/test/cleanup/cleanup.go | 29 +++++++- pkg/executables/cmk.go | 39 +++++++++- pkg/executables/cmk_test.go | 73 +++++++++++++++++++ .../testdata/cmk_list_network_duplicates.json | 20 +++++ test/framework/cloudstack.go | 2 +- 9 files changed, 165 insertions(+), 12 deletions(-) create mode 100644 pkg/executables/testdata/cmk_list_network_duplicates.json diff --git a/cmd/eks-a-tool/cmd/cloudstackrmvms.go b/cmd/eks-a-tool/cmd/cloudstackrmvms.go index ea2ee6d8a9d7..212f9b34837c 100644 --- a/cmd/eks-a-tool/cmd/cloudstackrmvms.go +++ b/cmd/eks-a-tool/cmd/cloudstackrmvms.go @@ -25,7 +25,7 @@ var cloudstackRmVmsCmd = &cobra.Command{ if err != nil { return err } - err = cleanup.CleanUpCloudstackTestResources(cmd.Context(), clusterName, viper.GetBool(dryRunFlag)) + err = cleanup.CloudstackTestResources(cmd.Context(), clusterName, viper.GetBool(dryRunFlag), false) if err != nil { log.Fatalf("Error removing vms: %v", err) } diff --git a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml index 399c743881e3..5787e73e8e36 100644 --- a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml @@ -82,7 +82,8 @@ phases: - > ./bin/test e2e cleanup cloudstack -n ${CLUSTER_NAME_PREFIX} - -v 4 + --delete-duplicate-networks + -v 6 build: commands: - export JOB_ID=$CODEBUILD_BUILD_ID diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index 8cc154209292..8afa3db618a1 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -177,7 +177,8 @@ phases: - > ./bin/test e2e cleanup cloudstack -n ${CLUSTER_NAME_PREFIX} - -v 4 + --delete-duplicate-networks + -v 6 build: commands: - export JOB_ID=$CODEBUILD_BUILD_ID diff --git a/cmd/integration_test/cmd/cleanupcloudstack.go b/cmd/integration_test/cmd/cleanupcloudstack.go index ba5b83344fd2..422df1394494 100644 --- a/cmd/integration_test/cmd/cleanupcloudstack.go +++ b/cmd/integration_test/cmd/cleanupcloudstack.go @@ -37,11 +37,14 @@ func preRunCleanUpCloudstackSetup(cmd *cobra.Command, args []string) { }) } +const deleteDuplicateNetworksFlag = "delete-duplicate-networks" + var requiredCloudstackCleanUpFlags = []string{clusterNameFlagName} func init() { cleanUpInstancesCmd.AddCommand(cleanUpCloudstackCmd) cleanUpCloudstackCmd.Flags().StringP(clusterNameFlagName, "n", "", "Cluster name for associated vms") + cleanUpCloudstackCmd.Flags().Bool(deleteDuplicateNetworksFlag, false, "Delete duplicate isolated networks") for _, flag := range requiredCloudstackCleanUpFlags { if err := cleanUpCloudstackCmd.MarkFlagRequired(flag); err != nil { @@ -52,7 +55,8 @@ func init() { func cleanUpCloudstackTestResources(ctx context.Context) error { clusterName := viper.GetString(clusterNameFlagName) - err := cleanup.CleanUpCloudstackTestResources(ctx, clusterName, false) + deleteDuplicateNetworks := viper.IsSet(deleteDuplicateNetworksFlag) + err := cleanup.CloudstackTestResources(ctx, clusterName, false, deleteDuplicateNetworks) if err != nil { return fmt.Errorf("running cleanup for cloudstack vms: %v", err) } diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index fb2a42db833d..0e3271dcfb53 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -3,6 +3,7 @@ package cleanup import ( "context" "fmt" + "os" "strconv" "time" @@ -22,8 +23,9 @@ import ( ) const ( - cleanupRetries = 5 - retryBackoff = 10 * time.Second + cleanupRetries = 5 + retryBackoff = 10 * time.Second + cloudstackNetworkVar = "T_CLOUDSTACK_NETWORK" ) func CleanUpAwsTestResources(storageBucket string, maxAge string, tag string) error { @@ -95,7 +97,9 @@ func VsphereRmVms(ctx context.Context, clusterName string, opts ...executables.G return govc.CleanupVms(ctx, clusterName, false) } -func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dryRun bool) error { +// CloudstackTestResources cleans up resources on the CloudStack environment. +// This can include VMs as well as duplicate networks. +func CloudstackTestResources(ctx context.Context, clusterName string, dryRun bool, deleteDuplicateNetworks bool) error { executableBuilder, close, err := executables.InitInDockerExecutablesBuilder(ctx, executables.DefaultEksaImage()) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) @@ -128,6 +132,25 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry if len(errorsMap) > 0 { return fmt.Errorf("cleaning up VMs: %+v", errorsMap) } + + return cleanupCloudstackDuplicateNetworks(ctx, cmk, execConfig, deleteDuplicateNetworks) +} + +func cleanupCloudstackDuplicateNetworks(ctx context.Context, cmk *executables.Cmk, execConfig *decoder.CloudStackExecConfig, deleteDuplicateNetworks bool) error { + if !deleteDuplicateNetworks { + return nil + } + + networkName, set := os.LookupEnv(cloudstackNetworkVar) + if !set { + return fmt.Errorf("ensuring no duplicate networks, %s is not set", cloudstackNetworkVar) + } + + for _, profile := range execConfig.Profiles { + if err := cmk.EnsureNoDuplicateNetwork(ctx, profile.Name, networkName); err != nil { + return err + } + } return nil } diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index 1cb4cc67bf83..64551202c172 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -291,6 +291,35 @@ func (c *Cmk) ValidateDomainAndGetId(ctx context.Context, profile string, domain return domainId, nil } +// EnsureNoDuplicateNetwork ensures that there are no duplicate networks with the name networkName. +// If it finds duplicates that are not shared networks, it deletes them. +func (c *Cmk) EnsureNoDuplicateNetwork(ctx context.Context, profile string, networkName string) error { + command := newCmkCommand(fmt.Sprintf("list networks filter=name,id,type keyword=%s", networkName)) + result, err := c.exec(ctx, profile, command...) + if err != nil { + return fmt.Errorf("getting network info - %s: %v", result.String(), err) + } + + response := struct { + CmkNetworks []cmkNetwork `json:"network"` + }{} + if err = json.Unmarshal(result.Bytes(), &response); err != nil { + return fmt.Errorf("parsing response into json: %v", err) + } + + for _, network := range response.CmkNetworks { + if !strings.EqualFold(network.Type, "Shared") { + command := newCmkCommand(fmt.Sprintf("delete network id=%s force=true", network.ID)) + result, err := c.exec(ctx, profile, command...) + if err != nil { + return fmt.Errorf("deleting duplicate network with ID %s - %s: %v", network, result.String(), err) + } + } + } + + return nil +} + func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string) error { command := newCmkCommand("list networks") // account must be specified within a domainId @@ -436,10 +465,6 @@ func (c *Cmk) CleanupVms(ctx context.Context, profile string, clusterName string } func (c *Cmk) exec(ctx context.Context, profile string, args ...string) (stdout bytes.Buffer, err error) { - if err != nil { - return bytes.Buffer{}, fmt.Errorf("failed get environment map: %v", err) - } - configFile, err := c.buildCmkConfigFile(profile) if err != nil { return bytes.Buffer{}, fmt.Errorf("failed cmk validations: %v", err) @@ -490,6 +515,12 @@ type cmkServiceOffering struct { Name string `json:"name"` } +type cmkNetwork struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` +} + type cmkResourceIdentifier struct { Id string `json:"id"` Name string `json:"name"` diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index 01c9843b4004..7a265f02cd9d 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -215,6 +215,79 @@ func TestCmkCleanupVms(t *testing.T) { } } +func TestCmkEnsureNoDuplicateNetwork(t *testing.T) { + _, writer := test.NewWriter(t) + configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) + tests := []struct { + testName string + argumentsExecCalls [][]string + jsonResponseFile string + cmkFunc func(cmk executables.Cmk, ctx context.Context) error + cmkResponseError error + wantErr bool + }{ + { + testName: "EnsureNoDuplicateNetwork success on no duplicate networks", + jsonResponseFile: "testdata/cmk_list_network_multiple.json", + argumentsExecCalls: [][]string{ + { + "-c", configFilePath, + "list", "networks", "filter=name,id,type", "keyword=eksa-cloudstack-ci-net", + }, + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.EnsureNoDuplicateNetwork(ctx, execConfig.Profiles[0].Name, "eksa-cloudstack-ci-net") + }, + cmkResponseError: nil, + wantErr: false, + }, + { + testName: "EnsureNoDuplicateNetwork success on deleting duplicate networks", + jsonResponseFile: "testdata/cmk_list_network_duplicates.json", + argumentsExecCalls: [][]string{ + { + "-c", configFilePath, + "list", "networks", "filter=name,id,type", "keyword=eksa-cloudstack-ci-net", + }, + { + "-c", configFilePath, + "delete", "network", "id=fe1a7310-51d4-4299-b3d0-a627a57bb4b0", "force=true", + }, + { + "-c", configFilePath, + "delete", "network", "id=24fd6849-3016-4afe-948d-4ce2bb396cf5", "force=true", + }, + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.EnsureNoDuplicateNetwork(ctx, execConfig.Profiles[0].Name, "eksa-cloudstack-ci-net") + }, + cmkResponseError: nil, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + fileContent := test.ReadFile(t, tt.jsonResponseFile) + + ctx := context.Background() + mockCtrl := gomock.NewController(t) + + executable := mockexecutables.NewMockExecutable(mockCtrl) + for _, argsList := range tt.argumentsExecCalls { + executable.EXPECT().Execute(ctx, argsList). + Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) + } + cmk, _ := executables.NewCmk(executable, writer, execConfig) + err := tt.cmkFunc(*cmk, ctx) + if tt.wantErr && err != nil || !tt.wantErr && err == nil { + return + } + t.Fatalf("Cmk error: %v", err) + }) + } +} + func TestNewCmkNilConfig(t *testing.T) { _, err := executables.NewCmk(nil, nil, nil) if err == nil { diff --git a/pkg/executables/testdata/cmk_list_network_duplicates.json b/pkg/executables/testdata/cmk_list_network_duplicates.json new file mode 100644 index 000000000000..cd8f37087fd7 --- /dev/null +++ b/pkg/executables/testdata/cmk_list_network_duplicates.json @@ -0,0 +1,20 @@ +{ + "count": 3, + "network": [ + { + "id": "fe1a7310-51d4-4299-b3d0-a627a57bb4b0", + "name": "eksa-cloudstack-ci-net", + "type": "Isolated" + }, + { + "id": "24fd6849-3016-4afe-948d-4ce2bb396cf5", + "name": "eksa-cloudstack-ci-net", + "type": "Isolated" + }, + { + "id": "13b501c1-5629-40e1-ba1e-a31caa9aead4", + "name": "eksa-cloudstack-ci-net", + "type": "Shared" + } + ] +} diff --git a/test/framework/cloudstack.go b/test/framework/cloudstack.go index 5f644a7df5a9..4ca93ebefdee 100644 --- a/test/framework/cloudstack.go +++ b/test/framework/cloudstack.go @@ -255,7 +255,7 @@ func (c *CloudStack) ClusterConfigUpdates() []api.ClusterConfigFiller { } func (c *CloudStack) CleanupVMs(clusterName string) error { - return cleanup.CleanUpCloudstackTestResources(context.Background(), clusterName, false) + return cleanup.CloudstackTestResources(context.Background(), clusterName, false, false) } func (c *CloudStack) WithProviderUpgrade(fillers ...api.CloudStackFiller) ClusterE2ETestOpt { From e1d9169831b258084efb7df5a6be62eb5ae1386e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 12:53:00 -0700 Subject: [PATCH 037/193] Bump golang.org/x/sync from 0.6.0 to 0.7.0 in /release/cli (#7948) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.6.0 to 0.7.0. - [Commits](https://github.com/golang/sync/compare/v0.6.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 1294042dd1e6..0fcdf520a6e1 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -15,7 +15,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 - golang.org/x/sync v0.6.0 + golang.org/x/sync v0.7.0 helm.sh/helm/v3 v3.14.3 k8s.io/apimachinery v0.29.3 k8s.io/helm v2.17.0+incompatible diff --git a/release/cli/go.sum b/release/cli/go.sum index 724954afe1c6..872c69500966 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -726,8 +726,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 98d5214778abca1b8dd7017f5adfe7737cbd3545 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 12:53:07 -0700 Subject: [PATCH 038/193] Bump github.com/aws/aws-sdk-go from 1.51.13 to 1.51.18 in /release/cli (#7949) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.51.13 to 1.51.18. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.51.13...v1.51.18) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 0fcdf520a6e1..dae75bc0d457 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.51.13 + github.com/aws/aws-sdk-go v1.51.18 github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index 872c69500966..d36c3a3dbe2d 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.13 h1:j6lgtz9E/XFRiYYnGNrAfWvyyTsuYvWvo2RCt0zqAIs= -github.com/aws/aws-sdk-go v1.51.13/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.18 h1:JKrk49ZlBTyKa4+droU7U/hk0QG84v91xaA58O0LPdo= +github.com/aws/aws-sdk-go v1.51.18/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From b106abd2af01e527560892d589ded79469d418c8 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Tue, 9 Apr 2024 13:45:44 -0700 Subject: [PATCH 039/193] [PR BOT] Generate release testdata files (#7942) --- .../test/testdata/main-bundle-release.yaml | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index 483fef030604..6372d8ba15ff 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -354,7 +354,7 @@ spec: version: v1.0.12+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml controller: arch: - amd64 @@ -363,7 +363,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -374,8 +374,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml + version: v1.0.20+abcdef1 flux: helmController: arch: @@ -1132,7 +1132,7 @@ spec: version: v1.0.12+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml controller: arch: - amd64 @@ -1141,7 +1141,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1152,8 +1152,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml + version: v1.0.20+abcdef1 flux: helmController: arch: @@ -1910,7 +1910,7 @@ spec: version: v1.0.12+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml controller: arch: - amd64 @@ -1919,7 +1919,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1930,8 +1930,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml + version: v1.0.20+abcdef1 flux: helmController: arch: @@ -2688,7 +2688,7 @@ spec: version: v1.0.12+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml controller: arch: - amd64 @@ -2697,7 +2697,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2708,8 +2708,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml + version: v1.0.20+abcdef1 flux: helmController: arch: @@ -3466,7 +3466,7 @@ spec: version: v1.0.12+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml controller: arch: - amd64 @@ -3475,7 +3475,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.19-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3486,8 +3486,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.19/metadata.yaml - version: v1.0.19+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml + version: v1.0.20+abcdef1 flux: helmController: arch: From 4d692213bdedb1e629dc0e3936f9628d37e263ba Mon Sep 17 00:00:00 2001 From: Tanvir Tatla Date: Tue, 9 Apr 2024 14:14:44 -0700 Subject: [PATCH 040/193] Fix Delete Encrypted Etcd Cluster (#7950) --- pkg/api/v1alpha1/cluster_webhook.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/api/v1alpha1/cluster_webhook.go b/pkg/api/v1alpha1/cluster_webhook.go index 97845612add8..7cc812afbdff 100644 --- a/pkg/api/v1alpha1/cluster_webhook.go +++ b/pkg/api/v1alpha1/cluster_webhook.go @@ -66,7 +66,7 @@ func (r *Cluster) ValidateCreate() (admission.Warnings, error) { return nil, apierrors.NewBadRequest("creating new cluster on existing cluster is not supported for self managed clusters") } - if r.Spec.EtcdEncryption != nil { + if !r.IsReconcilePaused() && r.Spec.EtcdEncryption != nil { allErrs = append(allErrs, field.Invalid(field.NewPath("spec.etcdEncryption"), r.Spec.EtcdEncryption, "etcdEncryption is not supported during cluster creation")) } From a0e5f31d4197a5628d7458764b312cfd34664ae6 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Tue, 9 Apr 2024 15:05:44 -0700 Subject: [PATCH 041/193] [PR BOT] Generate release testdata files (#7951) --- .../test/testdata/main-bundle-release.yaml | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/test/testdata/main-bundle-release.yaml index 6372d8ba15ff..a970f556362f 100644 --- a/release/cli/pkg/test/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/test/testdata/main-bundle-release.yaml @@ -330,7 +330,7 @@ spec: version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -339,7 +339,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -350,8 +350,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml @@ -1108,7 +1108,7 @@ spec: version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -1117,7 +1117,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1128,8 +1128,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml @@ -1886,7 +1886,7 @@ spec: version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -1895,7 +1895,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1906,8 +1906,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml @@ -2664,7 +2664,7 @@ spec: version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -2673,7 +2673,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2684,8 +2684,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml @@ -3442,7 +3442,7 @@ spec: version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml controller: arch: - amd64 @@ -3451,7 +3451,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-bootstrap-provider os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.12-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3462,8 +3462,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.12/metadata.yaml - version: v1.0.12+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 etcdadmController: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml From 2124614812962b71b9543ad2d2531eea3c7426c6 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 9 Apr 2024 19:30:22 -0700 Subject: [PATCH 042/193] Move release testdata file to `operations` sub-package (#7952) --- release/Makefile | 4 ++-- release/cli/pkg/operations/bundle_release_test.go | 2 +- .../{test => operations}/testdata/main-bundle-release.yaml | 0 3 files changed, 3 insertions(+), 3 deletions(-) rename release/cli/pkg/{test => operations}/testdata/main-bundle-release.yaml (100%) diff --git a/release/Makefile b/release/Makefile index 1ce8e841ef5c..920ec2304060 100644 --- a/release/Makefile +++ b/release/Makefile @@ -121,9 +121,9 @@ bin/golangci-lint: unit-test: ## Run go test against code. $(GO) test -C ./cli -count=1 ./... -update-bundle-golden-files: ## Updates testdata files located under pkg/test/testdata +update-bundle-golden-files: ## Updates testdata files located under pkg/operations/testdata $(GO) test -C cli -count=1 ./pkg/operations -update - $(eval DIFF_LINE_COUNT=$(shell git diff cli/pkg/test/testdata | wc -l)) + $(eval DIFF_LINE_COUNT=$(shell git diff cli/pkg/operations/testdata | wc -l)) @if [[ $(DIFF_LINE_COUNT) != 0 ]]; then \ printf "\n\033[33mWarning:\033[0m Testdata files have been updated! Ensure that these changes were intentional.\n"; \ fi diff --git a/release/cli/pkg/operations/bundle_release_test.go b/release/cli/pkg/operations/bundle_release_test.go index e7021a85ba3b..411cd3f546a7 100644 --- a/release/cli/pkg/operations/bundle_release_test.go +++ b/release/cli/pkg/operations/bundle_release_test.go @@ -38,7 +38,7 @@ import ( const ( releaseFolder = "release" - testdataFolder = "cli/pkg/test/testdata" + testdataFolder = "cli/pkg/operations/testdata" generatedBundleFolder = "generated-bundles" ) diff --git a/release/cli/pkg/test/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml similarity index 100% rename from release/cli/pkg/test/testdata/main-bundle-release.yaml rename to release/cli/pkg/operations/testdata/main-bundle-release.yaml From 1fdea5edb7d260d323e12d0c8313d55a8255ec75 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 9 Apr 2024 20:45:10 -0700 Subject: [PATCH 043/193] Fix release testdata update script (#7953) --- scripts/golden_create_pr.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golden_create_pr.sh b/scripts/golden_create_pr.sh index 2164c826df05..4032953c0f6a 100755 --- a/scripts/golden_create_pr.sh +++ b/scripts/golden_create_pr.sh @@ -43,7 +43,7 @@ git remote add upstream git@github.com:${UPSTREAM_ORG}/${REPO}.git git checkout -b $PR_BRANCH git diff -git add release/cli/pkg/test/testdata/*.yaml +git add release/cli/pkg/operations/testdata/*.yaml # If some other files get modified, the changes should be ignored git restore . FILES_ADDED=$(git diff --staged --name-only) From aa9b312627e04a2eae7040a1df1b998b921a242f Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Wed, 10 Apr 2024 10:18:44 -0700 Subject: [PATCH 044/193] Nutanix ensure VMs cleaned up before and after tests (#7955) --- cmd/eks-a-tool/cmd/nutanixrmvms.go | 2 +- .../buildspecs/nutanix-test-eks-a-cli.yml | 18 +++++ .../build/buildspecs/quick-test-eks-a-cli.yml | 16 +++++ cmd/integration_test/cmd/cleanupnutanix.go | 72 +++++++++++++++++++ internal/test/cleanup/cleanup.go | 4 +- test/framework/nutanix.go | 2 +- 6 files changed, 110 insertions(+), 4 deletions(-) create mode 100644 cmd/integration_test/cmd/cleanupnutanix.go diff --git a/cmd/eks-a-tool/cmd/nutanixrmvms.go b/cmd/eks-a-tool/cmd/nutanixrmvms.go index 87ddf517739c..3878960954a0 100644 --- a/cmd/eks-a-tool/cmd/nutanixrmvms.go +++ b/cmd/eks-a-tool/cmd/nutanixrmvms.go @@ -31,7 +31,7 @@ var nutanixRmVmsCmd = &cobra.Command{ if viper.IsSet(insecureFlag) { insecure = true } - err = cleanup.NutanixTestResourcesCleanup(cmd.Context(), clusterName, viper.GetString(endpointFlag), viper.GetString(portFlag), insecure, viper.GetBool(ignoreErrorsFlag)) + err = cleanup.NutanixTestResources(clusterName, viper.GetString(endpointFlag), viper.GetString(portFlag), insecure, viper.GetBool(ignoreErrorsFlag)) if err != nil { log.Fatalf("Error removing vms: %v", err) } diff --git a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml index 10f3491c6b76..e264fdab0c13 100644 --- a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml @@ -59,6 +59,14 @@ phases: if ! [[ ${CODEBUILD_INITIATOR} =~ "codepipeline" ]]; then make build-eks-a-for-e2e build-integration-test-binary e2e-tests-binary E2E_TAGS="e2e nutanix" E2E_OUTPUT_FILE=bin/nutanix/e2e.test fi + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors + -v 4 build: commands: - export JOB_ID=$CODEBUILD_BUILD_ID @@ -85,6 +93,16 @@ phases: --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} + post_build: + commands: + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors + -v 4 reports: e2e-reports: files: diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index 8afa3db618a1..2c519ed25e66 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -179,6 +179,14 @@ phases: -n ${CLUSTER_NAME_PREFIX} --delete-duplicate-networks -v 6 + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors + -v 4 build: commands: - export JOB_ID=$CODEBUILD_BUILD_ID @@ -219,6 +227,14 @@ phases: ./bin/test e2e cleanup cloudstack -n ${CLUSTER_NAME_PREFIX} -v 4 + - > + ./bin/test e2e cleanup nutanix + -n ${CLUSTER_NAME_PREFIX} + -e ${T_NUTANIX_ENDPOINT} + -p ${T_NUTANIX_PORT} + --insecure + --ignoreErrors + -v 4 reports: e2e-reports: files: diff --git a/cmd/integration_test/cmd/cleanupnutanix.go b/cmd/integration_test/cmd/cleanupnutanix.go new file mode 100644 index 000000000000..46426e1c07a3 --- /dev/null +++ b/cmd/integration_test/cmd/cleanupnutanix.go @@ -0,0 +1,72 @@ +package cmd + +import ( + "fmt" + "log" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "github.com/aws/eks-anywhere/internal/test/cleanup" + "github.com/aws/eks-anywhere/pkg/logger" +) + +const ( + endpointFlag = "endpoint" + portFlag = "port" + insecureFlag = "insecure" + ignoreErrorsFlag = "ignoreErrors" +) + +var requiredNutanixCleanUpFlags = []string{clusterNameFlagName, endpointFlag} + +var cleanUpNutanixCmd = &cobra.Command{ + Use: "nutanix", + Short: "Clean up e2e vms on Nutanix Prism", + Long: "Clean up vms created for e2e testing on Nutanix Prism", + SilenceUsage: true, + PreRun: preRunCleanUpNutanixSetup, + RunE: func(_ *cobra.Command, _ []string) error { + err := cleanUpNutanixTestResources() + if err != nil { + logger.Fatal(err, "Failed to cleanup e2e vms on Nutanix Prism") + } + return nil + }, +} + +func preRunCleanUpNutanixSetup(cmd *cobra.Command, _ []string) { + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + err := viper.BindPFlag(flag.Name, flag) + if err != nil { + log.Fatalf("Error initializing flags: %v", err) + } + }) +} + +func init() { + cleanUpInstancesCmd.AddCommand(cleanUpNutanixCmd) + + cleanUpNutanixCmd.Flags().StringP(clusterNameFlagName, "n", "", "Cluster name for associated vms") + cleanUpNutanixCmd.Flags().StringP(endpointFlag, "e", "", "Nutanix Prism endpoint") + cleanUpNutanixCmd.Flags().StringP(portFlag, "p", "9440", "Nutanix Prism port") + cleanUpNutanixCmd.Flags().BoolP(insecureFlag, "k", false, "skip TLS when contacting Prism APIs") + cleanUpNutanixCmd.Flags().Bool(ignoreErrorsFlag, true, "ignore APIs errors when deleting VMs") + + for _, flag := range requiredNutanixCleanUpFlags { + if err := cleanUpNutanixCmd.MarkFlagRequired(flag); err != nil { + log.Fatalf("Error marking flag %s as required: %v", flag, err) + } + } +} + +func cleanUpNutanixTestResources() error { + clusterName := viper.GetString(clusterNameFlagName) + err := cleanup.NutanixTestResources(clusterName, viper.GetString(endpointFlag), viper.GetString(portFlag), viper.IsSet(insecureFlag), viper.IsSet(ignoreErrorsFlag)) + if err != nil { + return fmt.Errorf("running cleanup for Nutanix vms: %v", err) + } + + return nil +} diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 0e3271dcfb53..023d3e025094 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -154,8 +154,8 @@ func cleanupCloudstackDuplicateNetworks(ctx context.Context, cmk *executables.Cm return nil } -// NutanixTestResourcesCleanup cleans up any leftover VMs in Nutanix after a test run. -func NutanixTestResourcesCleanup(ctx context.Context, clusterName, endpoint, port string, insecure, ignoreErrors bool) error { +// NutanixTestResources cleans up any leftover VMs in Nutanix after a test run. +func NutanixTestResources(clusterName, endpoint, port string, insecure, ignoreErrors bool) error { creds := nutanix.GetCredsFromEnv() nutanixCreds := prismgoclient.Credentials{ URL: fmt.Sprintf("%s:%s", endpoint, port), diff --git a/test/framework/nutanix.go b/test/framework/nutanix.go index 314a9c47304b..ecde5892e811 100644 --- a/test/framework/nutanix.go +++ b/test/framework/nutanix.go @@ -160,7 +160,7 @@ func (n *Nutanix) UpdateKubeConfig(content *[]byte, clusterName string) error { // CleanupVMs satisfies the test framework Provider. func (n *Nutanix) CleanupVMs(clustername string) error { - return cleanup.NutanixTestResourcesCleanup(context.Background(), clustername, os.Getenv(nutanixEndpoint), os.Getenv(nutanixPort), true, true) + return cleanup.NutanixTestResources(clustername, os.Getenv(nutanixEndpoint), os.Getenv(nutanixPort), true, true) } // ClusterConfigUpdates satisfies the test framework Provider. From 32ae94d2c99dabac3fd7bcf630b4cb5f1919f383 Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Wed, 10 Apr 2024 12:16:45 -0700 Subject: [PATCH 045/193] Fix nutanix cleanup (#7960) --- cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml index e264fdab0c13..deff7c9d3483 100644 --- a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml @@ -55,6 +55,7 @@ phases: - source ${CODEBUILD_SRC_DIR}/cmd/integration_test/build/script/setup_profile.sh - source ${CODEBUILD_SRC_DIR}/cmd/integration_test/build/script/create_infra_config.sh - ${CODEBUILD_SRC_DIR}/cmd/integration_test/build/script/start_docker.sh + - export CLUSTER_NAME_PREFIX="${BRANCH_NAME//./-}" - | if ! [[ ${CODEBUILD_INITIATOR} =~ "codepipeline" ]]; then make build-eks-a-for-e2e build-integration-test-binary e2e-tests-binary E2E_TAGS="e2e nutanix" E2E_OUTPUT_FILE=bin/nutanix/e2e.test From 618bd05876a52bae1f154ebb9313bfa60a822b07 Mon Sep 17 00:00:00 2001 From: ahreehong <46465244+ahreehong@users.noreply.github.com> Date: Wed, 10 Apr 2024 13:25:44 -0700 Subject: [PATCH 046/193] Skip airgapped proxy test until it can be run (#7961) --- test/e2e/SKIPPED_TESTS.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index 869f4df94866..f26bb7a13438 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -3,6 +3,7 @@ skipped_tests: #Airgapped tests - TestCloudStackKubernetes125RedhatAirgappedRegistryMirror - TestCloudStackKubernetes126RedhatAirgappedRegistryMirror +- TestCloudStackKubernetes128RedhatAirgappedProxy # Proxy tests - TestCloudStackKubernetes125RedhatProxyConfigAPI From ffb3abeecc82989df15c34e1017d95912cbeaada Mon Sep 17 00:00:00 2001 From: Ray Krueger Date: Thu, 11 Apr 2024 13:07:47 -0500 Subject: [PATCH 047/193] docs: Clarify the networking example config (#7962) The example config had an overlap between the POD cidr block and the control plane endpoint IP. Though it is documented in several places not to overlap this, the example config is confusing. --- .../en/docs/getting-started/vsphere/vsphere-spec.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md index dedfd422c6aa..84d758b66de4 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md @@ -22,15 +22,15 @@ spec: cniConfig: # Cluster CNI plugin - default: cilium (required) cilium: {} pods: - cidrBlocks: # Subnet CIDR notation for pods (required) + cidrBlocks: # Internal Kubernetes subnet CIDR block for pods (required) - 192.168.0.0/16 services: - cidrBlocks: # Subnet CIDR notation for services (required) + cidrBlocks: # Internal Kubernetes subnet CIDR block for services (required) - 10.96.0.0/12 controlPlaneConfiguration: # Specific cluster control plane config (required) count: 2 # Number of control plane nodes (required) - endpoint: # IP for control plane endpoint (required) - host: "192.168.0.10" + endpoint: # IP for control plane endpoint on your network (required) + host: xxx.xxx.xxx.xxx machineGroupRef: # vSphere-specific Kubernetes node config (required) kind: VSphereMachineConfig name: my-cluster-machines From 3fe6cc75fba30f66691c8429a20037c461e36a86 Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Thu, 11 Apr 2024 15:17:47 -0700 Subject: [PATCH 048/193] Improve cluster autoscaler e2e test (#7965) --- test/framework/cluster.go | 37 +++++++++----------- test/framework/testdata/autoscaler_load.yaml | 17 +++++++++ test/framework/testdata/hpa_busybox.yaml | 29 --------------- 3 files changed, 34 insertions(+), 49 deletions(-) create mode 100644 test/framework/testdata/autoscaler_load.yaml delete mode 100644 test/framework/testdata/hpa_busybox.yaml diff --git a/test/framework/cluster.go b/test/framework/cluster.go index 60891c3a03c4..f5970d25992c 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -68,8 +68,8 @@ const ( //go:embed testdata/oidc-roles.yaml var oidcRoles []byte -//go:embed testdata/hpa_busybox.yaml -var hpaBusybox []byte +//go:embed testdata/autoscaler_load.yaml +var autoscalerLoad []byte //go:embed testdata/local-path-storage.yaml var localPathProvisioner []byte @@ -1997,49 +1997,46 @@ func (e *ClusterE2ETest) InstallAutoScalerWithMetricServer(targetNamespace strin // CombinedAutoScalerMetricServerTest verifies that new nodes are spun up after using a HPA to scale a deployment. func (e *ClusterE2ETest) CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace string, mgmtCluster *types.Cluster) { ctx := context.Background() - ns := "default" - name := "hpa-busybox-test" machineDeploymentName := e.ClusterName + "-" + "md-0" + autoscalerDeploymentName := "cluster-autoscaler-clusterapi-cluster-autoscaler" e.VerifyMetricServerPackageInstalled(metricServerName, targetNamespace, mgmtCluster) e.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster) - e.T.Log("Metrics Server and Cluster Autoscaler ready") - err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, mgmtCluster, hpaBusybox) - if err != nil { - e.T.Fatalf("Failed to apply hpa busybox load %s", err) - } - e.T.Log("Deploying test workload") - - err = e.KubectlClient.WaitForDeployment(ctx, - e.Cluster(), "5m", "Available", name, ns) + err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, mgmtCluster, autoscalerLoad) if err != nil { - e.T.Fatalf("Failed waiting for test workload deployent %s", err) + e.T.Fatalf("Failed to apply autoscaler load %s", err) } - params := []string{"autoscale", "deployment", name, "--cpu-percent=50", "--min=1", "--max=20", "--kubeconfig", e.KubeconfigFilePath()} - _, err = e.KubectlClient.ExecuteCommand(ctx, params...) + // There is a bug in cluster autoscaler currently where it's not able to autoscale the cluster + // because of missing permissions on infrastructure machine template. + // Cluster Autoscaler does restart after ~10 min after which it starts functioning normally. + // We are force triggering a restart so the e2e doesn't have to wait 10 min for the restart. + // This can be removed once the following issue is resolve upstream. + // https://github.com/kubernetes/autoscaler/issues/6490 + _, err = e.KubectlClient.ExecuteCommand(ctx, "rollout", "restart", "deployment", "-n", targetNamespace, autoscalerDeploymentName, "--kubeconfig", e.KubeconfigFilePath()) if err != nil { - e.T.Fatalf("Failed to autoscale deployent: %s", err) + e.T.Fatalf("Failed to rollout cluster autoscaler %s", err) } + e.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster) e.T.Log("Waiting for machinedeployment to begin scaling up") - err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "20m", "status.phase", "ScalingUp", + err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "10m", "status.phase", "ScalingUp", fmt.Sprintf("machinedeployments.cluster.x-k8s.io/%s", machineDeploymentName), constants.EksaSystemNamespace) if err != nil { e.T.Fatalf("Failed to get ScalingUp phase for machinedeployment: %s", err) } e.T.Log("Waiting for machinedeployment to finish scaling up") - err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "15m", "status.phase", "Running", + err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "20m", "status.phase", "Running", fmt.Sprintf("machinedeployments.cluster.x-k8s.io/%s", machineDeploymentName), constants.EksaSystemNamespace) if err != nil { e.T.Fatalf("Failed to get Running phase for machinedeployment: %s", err) } - err = e.KubectlClient.WaitForMachineDeploymentReady(ctx, mgmtCluster, "2m", + err = e.KubectlClient.WaitForMachineDeploymentReady(ctx, mgmtCluster, "5m", machineDeploymentName) if err != nil { e.T.Fatalf("Machine deployment stuck in scaling up: %s", err) diff --git a/test/framework/testdata/autoscaler_load.yaml b/test/framework/testdata/autoscaler_load.yaml new file mode 100644 index 000000000000..b3598594302f --- /dev/null +++ b/test/framework/testdata/autoscaler_load.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: autoscaler-load +spec: + selector: + matchLabels: + run: autoscaler-load + replicas: 111 # k8s node can have upto 110 pods so this ensures atleast 1 pod is unschedulable. + template: + metadata: + labels: + run: autoscaler-load + spec: + containers: + - name: nginx + image: public.ecr.aws/docker/library/nginx:stable diff --git a/test/framework/testdata/hpa_busybox.yaml b/test/framework/testdata/hpa_busybox.yaml deleted file mode 100644 index 38459830d4c6..000000000000 --- a/test/framework/testdata/hpa_busybox.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hpa-busybox-test -spec: - selector: - matchLabels: - run: hpa-busybox-test - replicas: 1 - template: - metadata: - labels: - run: hpa-busybox-test - spec: - containers: - - name: busybox - image: public.ecr.aws/docker/library/busybox:1.36 - resources: - limits: - cpu: 50m - requests: - cpu: 10m - memory: 500Mi - command: ["sh", "-c"] - args: - - while [ 1 ]; do - echo "Test"; - sleep 0.01; - done \ No newline at end of file From 51ee96b34e24d2a53c8af66fbf283f83a2298eda Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Thu, 11 Apr 2024 15:46:48 -0700 Subject: [PATCH 049/193] fix conformance tests (#7969) --- .../buildspecs/conformance-eks-a-cli.yml | 44 ++++-- internal/pkg/conformance/download.go | 56 ++++--- test/e2e/SKIPPED_TESTS.yaml | 9 ++ test/e2e/conformance_test.go | 149 ++++++++++++++++++ 4 files changed, 224 insertions(+), 34 deletions(-) diff --git a/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml index e31e007051e5..558aec8a6301 100644 --- a/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml @@ -4,7 +4,7 @@ env: variables: INTEGRATION_TEST_MAX_EC2_COUNT: 25 INTEGRATION_TEST_MAX_CONCURRENT_TEST_COUNT: 25 - T_TINKERBELL_MAX_HARDWARE_PER_TEST: 5 + T_TINKERBELL_MAX_HARDWARE_PER_TEST: 2 T_CLOUDSTACK_CIDR: "10.80.191.0/24" CLOUDSTACK_PROVIDER: true T_TINKERBELL_INVENTORY_CSV: "hardware-manifests/inventory.csv" @@ -13,27 +13,26 @@ env: TEST_RUNNER_GOVC_TEMPLATE: "eks-a-admin-ci" INTEGRATION_TEST_INFRA_CONFIG: "/tmp/test-infra.yml" T_VSPHERE_TEMPLATE_FOLDER: "/SDDC-Datacenter/vm/Templates" - T_VSPHERE_TEMPLATE_UBUNTU_1_22: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-22" - T_VSPHERE_TEMPLATE_UBUNTU_1_23: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-23" - T_VSPHERE_TEMPLATE_UBUNTU_1_24: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-24" T_VSPHERE_TEMPLATE_UBUNTU_1_25: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-25" T_VSPHERE_TEMPLATE_UBUNTU_1_26: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-26" T_VSPHERE_TEMPLATE_UBUNTU_1_27: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-27" T_VSPHERE_TEMPLATE_UBUNTU_1_28: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-28" - T_VSPHERE_TEMPLATE_BR_1_22: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-22" - T_VSPHERE_TEMPLATE_BR_1_23: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-23" - T_VSPHERE_TEMPLATE_BR_1_24: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-24" + T_VSPHERE_TEMPLATE_UBUNTU_1_29: "/SDDC-Datacenter/vm/Templates/ubuntu-kube-v1-29" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_25: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-25" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_26: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-26" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_27: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-27" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_28: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-28" + T_VSPHERE_TEMPLATE_UBUNTU_2204_1_29: "/SDDC-Datacenter/vm/Templates/ubuntu-2204-kube-v1-29" T_VSPHERE_TEMPLATE_BR_1_25: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-25" T_VSPHERE_TEMPLATE_BR_1_26: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-26" T_VSPHERE_TEMPLATE_BR_1_27: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-27" T_VSPHERE_TEMPLATE_BR_1_28: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-28" - T_VSPHERE_TEMPLATE_REDHAT_1_22: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-22" - T_VSPHERE_TEMPLATE_REDHAT_1_23: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-23" - T_VSPHERE_TEMPLATE_REDHAT_1_24: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-24" + T_VSPHERE_TEMPLATE_BR_1_29: "/SDDC-Datacenter/vm/Templates/bottlerocket-kube-v1-29" T_VSPHERE_TEMPLATE_REDHAT_1_25: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-25" T_VSPHERE_TEMPLATE_REDHAT_1_26: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-26" T_VSPHERE_TEMPLATE_REDHAT_1_27: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-27" T_VSPHERE_TEMPLATE_REDHAT_1_28: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-28" + T_VSPHERE_TEMPLATE_REDHAT_1_29: "/SDDC-Datacenter/vm/Templates/redhat-kube-v1-29" T_NUTANIX_MACHINE_VCPU_PER_SOCKET: 1 T_NUTANIX_MACHINE_VCPU_SOCKET: 2 T_NUTANIX_MACHINE_MEMORY_SIZE: "4Gi" @@ -82,16 +81,16 @@ env: T_CLOUDSTACK_POD_CIDR: "cloudstack_ci_beta_connection:pod_cidr" T_CLOUDSTACK_SERVICE_CIDR: "cloudstack_ci_beta_connection:service_cidr" T_CLOUDSTACK_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" - T_TINKERBELL_IMAGE_UBUNTU_1_22: "tinkerbell_ci:image_ubuntu_1_22" - T_TINKERBELL_IMAGE_UBUNTU_1_23: "tinkerbell_ci:image_ubuntu_1_23" - T_TINKERBELL_IMAGE_UBUNTU_1_24: "tinkerbell_ci:image_ubuntu_1_24" T_TINKERBELL_IMAGE_UBUNTU_1_25: "tinkerbell_ci:image_ubuntu_1_25" T_TINKERBELL_IMAGE_UBUNTU_1_26: "tinkerbell_ci:image_ubuntu_1_26" T_TINKERBELL_IMAGE_UBUNTU_1_27: "tinkerbell_ci:image_ubuntu_1_27" T_TINKERBELL_IMAGE_UBUNTU_1_28: "tinkerbell_ci:image_ubuntu_1_28" - T_TINKERBELL_IMAGE_REDHAT_1_22: "tinkerbell_ci:image_redhat_1_22" - T_TINKERBELL_IMAGE_REDHAT_1_23: "tinkerbell_ci:image_redhat_1_23" - T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" + T_TINKERBELL_IMAGE_UBUNTU_1_29: "tinkerbell_ci:image_ubuntu_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_25: "tinkerbell_ci:image_ubuntu_2204_1_25" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_26: "tinkerbell_ci:image_ubuntu_2204_1_26" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" T_TINKERBELL_IMAGE_REDHAT_1_26: "tinkerbell_ci:image_redhat_1_26" T_TINKERBELL_IMAGE_REDHAT_1_27: "tinkerbell_ci:image_redhat_1_27" @@ -99,6 +98,7 @@ env: T_TINKERBELL_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" T_TINKERBELL_CP_NETWORK_CIDR: "tinkerbell_ci:cp_network_cidr" T_TINKERBELL_S3_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_inventory_csv" + T_TINKERBELL_S3_AG_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_ag_inventory_csv" TEST_RUNNER_GOVC_USERNAME: "tinkerbell_ci:govc_username" TEST_RUNNER_GOVC_PASSWORD: "tinkerbell_ci:govc_password" TEST_RUNNER_GOVC_URL: "tinkerbell_ci:govc_url" @@ -123,6 +123,18 @@ env: T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_26: "nutanix_ci:nutanix_template_ubuntu_1_26" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27: "nutanix_ci:nutanix_template_ubuntu_1_27" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28: "nutanix_ci:nutanix_template_ubuntu_1_28" + T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29: "nutanix_ci:nutanix_template_ubuntu_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25: "nutanix_ci:nutanix_template_rhel_8_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26: "nutanix_ci:nutanix_template_rhel_8_1_26" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27: "nutanix_ci:nutanix_template_rhel_8_1_27" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28: "nutanix_ci:nutanix_template_rhel_8_1_28" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29: "nutanix_ci:nutanix_template_rhel_8_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25: "nutanix_ci:nutanix_template_rhel_9_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26: "nutanix_ci:nutanix_template_rhel_9_1_26" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27: "nutanix_ci:nutanix_template_rhel_9_1_27" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28: "nutanix_ci:nutanix_template_rhel_9_1_28" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29: "nutanix_ci:nutanix_template_rhel_9_1_29" + phases: pre_build: commands: diff --git a/internal/pkg/conformance/download.go b/internal/pkg/conformance/download.go index e51028c27f52..d6e85957ba19 100644 --- a/internal/pkg/conformance/download.go +++ b/internal/pkg/conformance/download.go @@ -1,37 +1,57 @@ package conformance import ( - "bytes" + "encoding/json" "fmt" - - "golang.org/x/sys/unix" + "io" + "net/http" + "runtime" + "strings" "github.com/aws/eks-anywhere/internal/pkg/files" ) const ( - destinationFile = "sonobuoy" - sonobuoyDarwin = "https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.53.2/sonobuoy_0.53.2_darwin_amd64.tar.gz" - sonobuoyLinux = "https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.53.2/sonobuoy_0.53.2_linux_amd64.tar.gz" + destinationFile = "sonobuoy" + sonobouyGitHubAPI = "https://api.github.com/repos/vmware-tanzu/sonobuoy/releases/latest" ) +type githubRelease struct { + Assets []asset `json:"assets"` +} + +type asset struct { + BrowserDownloadURL string `json:"browser_download_url"` +} + func Download() error { - var err error - var utsname unix.Utsname - err = unix.Uname(&utsname) + resp, err := http.Get(sonobouyGitHubAPI) + if err != nil { + return fmt.Errorf("getting latest sonobouy version from GitHub: %v", err) + } + body, err := io.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("uname call failure: %v", err) + return fmt.Errorf("reading the response body for sonobouy release: %v", err) + } + + sonobouyRelease := githubRelease{} + if err := json.Unmarshal(body, &sonobouyRelease); err != nil { + return fmt.Errorf("unmarshalling the response body for sonobouy release: %v", err) } - var downloadFile string - sysname := string(bytes.Trim(utsname.Sysname[:], "\x00")) - if sysname == "Darwin" { - downloadFile = sonobuoyDarwin - } else { - downloadFile = sonobuoyLinux + downloadURL := "" + for _, asset := range sonobouyRelease.Assets { + if strings.Contains(asset.BrowserDownloadURL, runtime.GOOS) && strings.Contains(asset.BrowserDownloadURL, runtime.GOARCH) { + downloadURL = asset.BrowserDownloadURL + } } - fmt.Println("Downloading sonobuoy for " + sysname + ": " + downloadFile) - err = files.GzipFileDownloadExtract(downloadFile, destinationFile, "") + + if downloadURL == "" { + return fmt.Errorf("no binaries found for sonobouy for OS %s and ARCH %s", runtime.GOOS, runtime.GOARCH) + } + + fmt.Printf("Downloading sonobuoy from %s\n", downloadURL) + err = files.GzipFileDownloadExtract(downloadURL, destinationFile, "") if err != nil { return fmt.Errorf("failed to download sonobouy: %v", err) } diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index f26bb7a13438..cf183bd73ba5 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -101,3 +101,12 @@ skipped_tests: - TestTinkerbellKubernetes129UbuntuCuratedPackagesAdotSimpleFlow - TestTinkerbellKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow - TestTinkerbellKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow + +# Tinkerbell conformance +- TestTinkerbellKubernetes125BottleRocketThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes126BottleRocketThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes125ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes126ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes127ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes128ThreeReplicasTwoWorkersConformanceFlow +- TestTinkerbellKubernetes129ThreeReplicasTwoWorkersConformanceFlow diff --git a/test/e2e/conformance_test.go b/test/e2e/conformance_test.go index c72283f432c5..bd7d55fc4fe0 100644 --- a/test/e2e/conformance_test.go +++ b/test/e2e/conformance_test.go @@ -49,6 +49,36 @@ func TestDockerKubernetes126ThreeWorkersConformanceFlow(t *testing.T) { runConformanceFlow(test) } +func TestDockerKubernetes127ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestDockerKubernetes128ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestDockerKubernetes129ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + func TestVSphereKubernetes125ThreeWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -69,6 +99,36 @@ func TestVSphereKubernetes126ThreeWorkersConformanceFlow(t *testing.T) { runConformanceFlow(test) } +func TestVSphereKubernetes127ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu127()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes128ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu128()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes129ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu129()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + func TestVSphereKubernetes125BottleRocketThreeWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -89,6 +149,36 @@ func TestVSphereKubernetes126BottleRocketThreeWorkersConformanceFlow(t *testing. runConformanceFlow(test) } +func TestVSphereKubernetes127BottleRocketThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket127()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes128BottleRocketThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket128()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestVSphereKubernetes129BottleRocketThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket129()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + func TestTinkerbellKubernetes125ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -115,6 +205,45 @@ func TestTinkerbellKubernetes126ThreeReplicasTwoWorkersConformanceFlow(t *testin runTinkerbellConformanceFlow(test) } +func TestTinkerbellKubernetes127ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithUbuntu127Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithWorkerNodeCount(2)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithControlPlaneHardware(3), + framework.WithWorkerHardware(2), + ) + runTinkerbellConformanceFlow(test) +} + +func TestTinkerbellKubernetes128ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(2)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithControlPlaneHardware(3), + framework.WithWorkerHardware(2), + ) + runTinkerbellConformanceFlow(test) +} + +func TestTinkerbellKubernetes129ThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(2)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithControlPlaneHardware(3), + framework.WithWorkerHardware(2), + ) + runTinkerbellConformanceFlow(test) +} + func TestTinkerbellKubernetes125BottleRocketThreeReplicasTwoWorkersConformanceFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -170,3 +299,23 @@ func TestNutanixKubernetes127ThreeWorkersConformanceFlow(t *testing.T) { ) runConformanceFlow(test) } + +func TestNutanixKubernetes128ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu128Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} + +func TestNutanixKubernetes129ThreeWorkersConformanceFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu129Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runConformanceFlow(test) +} From 0c4cb911578aae4763414ecb2f9b4611f884a3a3 Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Thu, 11 Apr 2024 16:59:47 -0700 Subject: [PATCH 050/193] Adding a helm login to packages reconcile flow for workload clusters (#7971) --- controllers/factory.go | 2 +- pkg/curatedpackages/mocks/installer.go | 14 ++ .../packagecontrollerclient.go | 12 ++ .../packagecontrollerclient_test.go | 139 ++++++++++++++++++ 4 files changed, 166 insertions(+), 1 deletion(-) diff --git a/controllers/factory.go b/controllers/factory.go index d78ead38d93f..b0d04886f06c 100644 --- a/controllers/factory.go +++ b/controllers/factory.go @@ -561,7 +561,7 @@ func (f *Factory) withAWSIamConfigReconciler() *Factory { } func (f *Factory) withPackageControllerClient() *Factory { - f.dependencyFactory.WithHelm().WithKubectl() + f.dependencyFactory.WithHelm(helm.WithInsecure()).WithKubectl() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { if f.packageControllerClient != nil { diff --git a/pkg/curatedpackages/mocks/installer.go b/pkg/curatedpackages/mocks/installer.go index 7dbb146c16d9..fa67f8b523f1 100644 --- a/pkg/curatedpackages/mocks/installer.go +++ b/pkg/curatedpackages/mocks/installer.go @@ -176,6 +176,20 @@ func (mr *MockChartManagerMockRecorder) InstallChart(ctx, chart, ociURI, version return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallChart", reflect.TypeOf((*MockChartManager)(nil).InstallChart), ctx, chart, ociURI, version, kubeconfigFilePath, namespace, valueFilePath, skipCRDs, values) } +// RegistryLogin mocks base method. +func (m *MockChartManager) RegistryLogin(ctx context.Context, registry, username, password string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegistryLogin", ctx, registry, username, password) + ret0, _ := ret[0].(error) + return ret0 +} + +// RegistryLogin indicates an expected call of RegistryLogin. +func (mr *MockChartManagerMockRecorder) RegistryLogin(ctx, registry, username, password interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegistryLogin", reflect.TypeOf((*MockChartManager)(nil).RegistryLogin), ctx, registry, username, password) +} + // MockKubeDeleter is a mock of KubeDeleter interface. type MockKubeDeleter struct { ctrl *gomock.Controller diff --git a/pkg/curatedpackages/packagecontrollerclient.go b/pkg/curatedpackages/packagecontrollerclient.go index 5ca3f0cd03a9..b2deb0813e44 100644 --- a/pkg/curatedpackages/packagecontrollerclient.go +++ b/pkg/curatedpackages/packagecontrollerclient.go @@ -97,6 +97,7 @@ type ChartUninstaller interface { type ChartManager interface { ChartInstaller ChartUninstaller + RegistryLogin(ctx context.Context, registry, username, password string) error } // NewPackageControllerClientFullLifecycle creates a PackageControllerClient @@ -494,6 +495,17 @@ func (pc *PackageControllerClient) Reconcile(ctx context.Context, logger logr.Lo registry := registrymirror.FromCluster(cluster) + if registry != nil && registry.Auth { + rUsername, rPassword, err := config.ReadCredentialsFromSecret(ctx, client) + if err != nil { + return err + } + + if err := pc.chartManager.RegistryLogin(ctx, registry.BaseRegistry, rUsername, rPassword); err != nil { + return err + } + } + // No Kubeconfig is passed. This is intentional. The helm executable will // get that configuration from its environment. if err := pc.EnableFullLifecycle(ctx, logger, cluster.Name, "", image, registry, diff --git a/pkg/curatedpackages/packagecontrollerclient_test.go b/pkg/curatedpackages/packagecontrollerclient_test.go index 2af94ff9bc93..d31d3a4d6f35 100644 --- a/pkg/curatedpackages/packagecontrollerclient_test.go +++ b/pkg/curatedpackages/packagecontrollerclient_test.go @@ -1387,6 +1387,145 @@ func TestReconcile(s *testing.T) { t.Errorf("expected packages client error, got %s", err) } }) + + s.Run("golden path with registry mirror", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + cm := mocks.NewMockChartManager(ctrl) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + registrySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: "registry-credentials", + }, + } + eksaRelease := createEKSARelease(cluster, bundles) + cluster.Spec.BundlesRef = nil + cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{ + Endpoint: "1.2.3.4", + Port: "443", + Authenticate: true, + OCINamespaces: []anywherev1.OCINamespace{ + { + Namespace: "ecr-public", + Registry: "public.ecr.aws", + }, + }, + } + t.Setenv("REGISTRY_USERNAME", "username") + t.Setenv("REGISTRY_PASSWORD", "password") + + objs := []runtime.Object{cluster, bundles, secret, eksaRelease, registrySecret} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + cm.EXPECT().RegistryLogin(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + cm.EXPECT().InstallChart(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err != nil { + t.Errorf("expected nil error, got %s", err) + } + }) + + s.Run("registry mirror helm login fails", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + cm := mocks.NewMockChartManager(ctrl) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + registrySecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: "registry-credentials", + }, + } + eksaRelease := createEKSARelease(cluster, bundles) + cluster.Spec.BundlesRef = nil + cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{ + Endpoint: "1.2.3.4", + Port: "443", + Authenticate: true, + OCINamespaces: []anywherev1.OCINamespace{ + { + Namespace: "ecr-public", + Registry: "public.ecr.aws", + }, + }, + } + t.Setenv("REGISTRY_USERNAME", "username") + t.Setenv("REGISTRY_PASSWORD", "password") + + objs := []runtime.Object{cluster, bundles, secret, eksaRelease, registrySecret} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + cm.EXPECT().RegistryLogin(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("login error")) + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err == nil { + t.Errorf("expected error, got %s", err) + } + }) + + s.Run("registry mirror secret not found error", func(t *testing.T) { + ctx := context.Background() + log := testr.New(t) + cluster := newReconcileTestCluster() + ctrl := gomock.NewController(t) + k := mocks.NewMockKubectlRunner(ctrl) + cm := mocks.NewMockChartManager(ctrl) + bundles := createBundle(cluster) + bundles.Spec.VersionsBundles[0].KubeVersion = string(cluster.Spec.KubernetesVersion) + bundles.ObjectMeta.Name = cluster.Spec.BundlesRef.Name + bundles.ObjectMeta.Namespace = cluster.Spec.BundlesRef.Namespace + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: constants.EksaSystemNamespace, + Name: cluster.Name + "-kubeconfig", + }, + } + eksaRelease := createEKSARelease(cluster, bundles) + cluster.Spec.BundlesRef = nil + cluster.Spec.RegistryMirrorConfiguration = &anywherev1.RegistryMirrorConfiguration{ + Endpoint: "1.2.3.4", + Port: "443", + Authenticate: true, + OCINamespaces: []anywherev1.OCINamespace{ + { + Namespace: "ecr-public", + Registry: "public.ecr.aws", + }, + }, + } + objs := []runtime.Object{cluster, bundles, secret, eksaRelease} + fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + pcc := curatedpackages.NewPackageControllerClientFullLifecycle(log, cm, k, nil) + err := pcc.Reconcile(ctx, log, fakeClient, cluster) + if err == nil || !strings.Contains(err.Error(), "not found") { + t.Errorf("expected error, got %s", err) + } + }) } func newReconcileTestCluster() *anywherev1.Cluster { From a985abd327137f1db7ddd75455d5687d7dcc5787 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Thu, 11 Apr 2024 18:00:48 -0700 Subject: [PATCH 051/193] Node upgrader changes to consume upgrader go binary instead of script (#7927) * Node upgrades changes to consume upgrader go binary instead of script Signed-off-by: Rahul Ganesh * Fix unit tests for node upgrader Signed-off-by: Rahul Ganesh --------- Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- ...cted_first_control_plane_upgrader_pod.yaml | 29 ++++++++++++------- ...ected_rest_control_plane_upgrader_pod.yaml | 27 ++++++++++------- .../expected_worker_upgrader_pod.yaml | 27 ++++++++++------- pkg/nodeupgrader/upgrader.go | 18 ++++++------ 4 files changed, 62 insertions(+), 39 deletions(-) diff --git a/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml index 2839ead16697..9a22c3d48ede 100644 --- a/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml @@ -13,8 +13,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - print_status_and_cleanup + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - status command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -46,8 +47,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - upgrade_containerd + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - containerd command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -62,8 +64,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - cni_plugins + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - cni-plugins command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -78,9 +81,14 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubeadm_in_first_cp + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - node + - --type + - FirstCP + - --k8sVersion - v1.28.3-eks-1-28-9 + - --etcdVersion - v3.5.9-eks-1-28-9 command: - nsenter @@ -96,8 +104,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubelet_and_kubectl + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - kubelet-kubectl command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest diff --git a/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml index 393e88afbe54..263cbbf5d99e 100755 --- a/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml @@ -13,8 +13,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - print_status_and_cleanup + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - status command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -46,8 +47,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - upgrade_containerd + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - containerd command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -62,8 +64,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - cni_plugins + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - cni-plugins command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -78,8 +81,11 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubeadm_in_rest_cp + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - node + - --type + - RestCP command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -94,8 +100,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubelet_and_kubectl + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - kubelet-kubectl command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest diff --git a/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml index d08637883592..8538f062281a 100755 --- a/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_worker_upgrader_pod.yaml @@ -13,8 +13,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - print_status_and_cleanup + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - status command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -43,8 +44,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - upgrade_containerd + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - containerd command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -59,8 +61,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - cni_plugins + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - cni-plugins command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -75,8 +78,11 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubeadm_in_worker + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - node + - --type + - Worker command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest @@ -91,8 +97,9 @@ spec: - --uts - --ipc - --net - - /foo/eksa-upgrades/scripts/upgrade.sh - - kubelet_and_kubectl + - /foo/eksa-upgrades/tools/upgrader + - upgrade + - kubelet-kubectl command: - nsenter image: public.ecr.aws/eks-anywhere/node-upgrader:latest diff --git a/pkg/nodeupgrader/upgrader.go b/pkg/nodeupgrader/upgrader.go index 7e2f3213a41e..f8c654bdeb18 100644 --- a/pkg/nodeupgrader/upgrader.go +++ b/pkg/nodeupgrader/upgrader.go @@ -11,7 +11,7 @@ import ( ) const ( - upgradeScript = "/foo/eksa-upgrades/scripts/upgrade.sh" + upgradeBin = "/foo/eksa-upgrades/tools/upgrader" // CopierContainerName holds the name of the components copier container. CopierContainerName = "components-copier" @@ -40,21 +40,21 @@ func PodName(nodeName string) string { // UpgradeFirstControlPlanePod returns an upgrader pod that should be deployed on the first control plane node. func UpgradeFirstControlPlanePod(nodeName, image, kubernetesVersion, etcdVersion string) *corev1.Pod { p := upgraderPod(nodeName, image, true) - p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_first_cp", kubernetesVersion, etcdVersion) + p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "upgrade", "node", "--type", "FirstCP", "--k8sVersion", kubernetesVersion, "--etcdVersion", etcdVersion) return p } // UpgradeSecondaryControlPlanePod returns an upgrader pod that can be deployed on the remaining control plane nodes. func UpgradeSecondaryControlPlanePod(nodeName, image string) *corev1.Pod { p := upgraderPod(nodeName, image, true) - p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_rest_cp") + p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "upgrade", "node", "--type", "RestCP") return p } // UpgradeWorkerPod returns an upgrader pod that can be deployed on worker nodes. func UpgradeWorkerPod(nodeName, image string) *corev1.Pod { p := upgraderPod(nodeName, image, false) - p.Spec.InitContainers = containersForUpgrade(false, image, nodeName, "kubeadm_in_worker") + p.Spec.InitContainers = containersForUpgrade(false, image, nodeName, "upgrade", "node", "--type", "Worker") return p } @@ -76,7 +76,7 @@ func upgraderPod(nodeName, image string, isCP bool) *corev1.Pod { HostPID: true, Volumes: volumes, Containers: []corev1.Container{ - nsenterContainer(image, PostUpgradeContainerName, upgradeScript, "print_status_and_cleanup"), + nsenterContainer(image, PostUpgradeContainerName, upgradeBin, "upgrade", "status"), }, RestartPolicy: corev1.RestartPolicyOnFailure, }, @@ -86,10 +86,10 @@ func upgraderPod(nodeName, image string, isCP bool) *corev1.Pod { func containersForUpgrade(isCP bool, image, nodeName string, kubeadmUpgradeCommand ...string) []corev1.Container { return []corev1.Container{ copierContainer(image, isCP), - nsenterContainer(image, ContainerdUpgraderContainerName, upgradeScript, "upgrade_containerd"), - nsenterContainer(image, CNIPluginsUpgraderContainerName, upgradeScript, "cni_plugins"), - nsenterContainer(image, KubeadmUpgraderContainerName, append([]string{upgradeScript}, kubeadmUpgradeCommand...)...), - nsenterContainer(image, KubeletUpgradeContainerName, upgradeScript, "kubelet_and_kubectl"), + nsenterContainer(image, ContainerdUpgraderContainerName, upgradeBin, "upgrade", "containerd"), + nsenterContainer(image, CNIPluginsUpgraderContainerName, upgradeBin, "upgrade", "cni-plugins"), + nsenterContainer(image, KubeadmUpgraderContainerName, append([]string{upgradeBin}, kubeadmUpgradeCommand...)...), + nsenterContainer(image, KubeletUpgradeContainerName, upgradeBin, "upgrade", "kubelet-kubectl"), } } From 2795f6f21d60fdd66400a4a43c641819ff9679e4 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 11 Apr 2024 18:41:48 -0700 Subject: [PATCH 052/193] Replace DockerHub amazon-eks-pod-identity-webhook image with ECR Public image (#7973) --- .../buildspecs/cloudstack-test-eks-a-cli.yml | 1 + .../buildspecs/vsphere-test-eks-a-cli.yml | 1 + .../config/pod-identity-webhook.yaml | 2 +- test/framework/etcdencryption.go | 54 ++++++++++--------- 4 files changed, 32 insertions(+), 26 deletions(-) diff --git a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml index 5787e73e8e36..8e70a0250f07 100644 --- a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml @@ -63,6 +63,7 @@ env: T_IRSA_S3_BUCKET: "etcd-encryption:irsa_s3_bucket" T_KMS_IAM_ROLE: "etcd-encryption:kms_iam_role_arn" T_KMS_IMAGE: "etcd-encryption:kms_image" + T_POD_IDENTITY_WEBHOOK_IMAGE: "etcd-encryption:pod_identity_webhook_image" T_KMS_KEY_ARN: "etcd-encryption:kms_key_arn" T_KMS_KEY_REGION: "etcd-encryption:region" T_KMS_SOCKET: "etcd-encryption:socket" diff --git a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml index 3e0adaf91e60..ffb0fd7fe5db 100644 --- a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml @@ -84,6 +84,7 @@ env: T_IRSA_S3_BUCKET: "etcd-encryption:irsa_s3_bucket" T_KMS_IAM_ROLE: "etcd-encryption:kms_iam_role_arn" T_KMS_IMAGE: "etcd-encryption:kms_image" + T_POD_IDENTITY_WEBHOOK_IMAGE: "etcd-encryption:pod_identity_webhook_image" T_KMS_KEY_ARN: "etcd-encryption:kms_key_arn" T_KMS_KEY_REGION: "etcd-encryption:region" T_KMS_SOCKET: "etcd-encryption:socket" diff --git a/test/framework/config/pod-identity-webhook.yaml b/test/framework/config/pod-identity-webhook.yaml index 79d81f4fdef9..ae6247217a14 100644 --- a/test/framework/config/pod-identity-webhook.yaml +++ b/test/framework/config/pod-identity-webhook.yaml @@ -95,7 +95,7 @@ spec: serviceAccountName: pod-identity-webhook containers: - name: pod-identity-webhook - image: amazon/amazon-eks-pod-identity-webhook:latest + image: {{ .podIdentityWebhookImage }} imagePullPolicy: Always command: - /webhook diff --git a/test/framework/etcdencryption.go b/test/framework/etcdencryption.go index dcd68e8ee246..e3b6e17b33b9 100644 --- a/test/framework/etcdencryption.go +++ b/test/framework/etcdencryption.go @@ -28,12 +28,13 @@ import ( ) const ( - irsaS3BucketVar = "T_IRSA_S3_BUCKET" - kmsIAMRoleVar = "T_KMS_IAM_ROLE" - kmsImageVar = "T_KMS_IMAGE" - kmsKeyArn = "T_KMS_KEY_ARN" - kmsKeyRegion = "T_KMS_KEY_REGION" - kmsSocketVar = "T_KMS_SOCKET" + irsaS3BucketVar = "T_IRSA_S3_BUCKET" + kmsIAMRoleVar = "T_KMS_IAM_ROLE" + kmsImageVar = "T_KMS_IMAGE" + podIdentityWebhookImageVar = "T_POD_IDENTITY_WEBHOOK_IMAGE" + kmsKeyArn = "T_KMS_KEY_ARN" + kmsKeyRegion = "T_KMS_KEY_REGION" + kmsSocketVar = "T_KMS_SOCKET" defaultRegion = "us-west-2" keysFilename = "keys.json" @@ -54,27 +55,29 @@ type keyResponse struct { // etcdEncryptionTestVars stores all the environment variables needed by etcd encryption tests. type etcdEncryptionTestVars struct { - KmsKeyRegion string - S3Bucket string - KmsIamRole string - KmsImage string - KmsKeyArn string - KmsSocket string + KmsKeyRegion string + S3Bucket string + KmsIamRole string + KmsImage string + PodIdentityWebhookImage string + KmsKeyArn string + KmsSocket string } // RequiredEtcdEncryptionEnvVars returns the environment variables required . func RequiredEtcdEncryptionEnvVars() []string { - return []string{irsaS3BucketVar, kmsIAMRoleVar, kmsImageVar, kmsKeyArn, kmsSocketVar} + return []string{irsaS3BucketVar, kmsIAMRoleVar, kmsImageVar, podIdentityWebhookImageVar, kmsKeyArn, kmsSocketVar} } func getEtcdEncryptionVarsFromEnv() *etcdEncryptionTestVars { return &etcdEncryptionTestVars{ - KmsKeyRegion: os.Getenv(kmsKeyRegion), - S3Bucket: os.Getenv(irsaS3BucketVar), - KmsIamRole: os.Getenv(kmsIAMRoleVar), - KmsImage: os.Getenv(kmsImageVar), - KmsKeyArn: os.Getenv(kmsKeyArn), - KmsSocket: os.Getenv(kmsSocketVar), + KmsKeyRegion: os.Getenv(kmsKeyRegion), + S3Bucket: os.Getenv(irsaS3BucketVar), + KmsIamRole: os.Getenv(kmsIAMRoleVar), + KmsImage: os.Getenv(kmsImageVar), + PodIdentityWebhookImage: os.Getenv(podIdentityWebhookImageVar), + KmsKeyArn: os.Getenv(kmsKeyArn), + KmsSocket: os.Getenv(kmsSocketVar), } } @@ -212,12 +215,13 @@ func (e *ClusterE2ETest) deployPodIdentityWebhook(ctx context.Context, envVars * func (e *ClusterE2ETest) deployKMSProvider(ctx context.Context, envVars *etcdEncryptionTestVars) error { e.T.Log("Deploying AWS KMS Encryption Provider") values := map[string]string{ - "kmsImage": envVars.KmsImage, - "kmsIamRole": envVars.KmsIamRole, - "kmsKeyArn": envVars.KmsKeyArn, - "kmsKeyRegion": envVars.KmsKeyRegion, - "kmsSocket": envVars.KmsSocket, - "serviceAccountName": "kms-encrypter-decrypter", + "kmsImage": envVars.KmsImage, + "podIdentityWebhookImage": envVars.PodIdentityWebhookImage, + "kmsIamRole": envVars.KmsIamRole, + "kmsKeyArn": envVars.KmsKeyArn, + "kmsKeyRegion": envVars.KmsKeyRegion, + "kmsSocket": envVars.KmsSocket, + "serviceAccountName": "kms-encrypter-decrypter", } if e.OSFamily != v1alpha1.Bottlerocket { From bbbaa268d577229a23c87ea82155144e27f19894 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Thu, 11 Apr 2024 19:05:47 -0700 Subject: [PATCH 053/193] [PR BOT] Generate release testdata files (#7968) --- .../testdata/main-bundle-release.yaml | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index a970f556362f..679a59cad856 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -37,18 +37,18 @@ spec: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -815,18 +815,18 @@ spec: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -1593,18 +1593,18 @@ spec: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -2371,18 +2371,18 @@ spec: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 @@ -3149,18 +3149,18 @@ spec: arch: - amd64 description: Container image for bottlerocket-admin image - imageDigest: sha256:789cb7d120ac7633bda792660266214a486c182db67d89ee6508216fe2bc7f93 + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 name: bottlerocket-admin os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.4 + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 control: arch: - amd64 description: Container image for bottlerocket-control image - imageDigest: sha256:29deada0d540dfe6ecb8238e0eb04ed4a9e6b8cf0d5cab232890ef9404643c30 + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 name: bottlerocket-control os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.8 + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 kubeadmBootstrap: arch: - amd64 From db91031b66de42f78f44db139970f666b95a93f1 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Thu, 11 Apr 2024 19:32:47 -0700 Subject: [PATCH 054/193] Add K8s version 1.30 feature flag (#7972) * Add K8s version 1.30 feature flag Signed-off-by: Rahul Ganesh * set unreleased k8s version to 1.30 Signed-off-by: Rahul Ganesh --------- Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- pkg/api/v1alpha1/cluster_types.go | 1 + pkg/features/features.go | 9 +++++++++ pkg/features/features_test.go | 8 ++++++++ pkg/validations/cluster.go | 11 +++++++++++ pkg/validations/cluster_test.go | 16 ++++++++++++++++ .../createvalidations/preflightvalidations.go | 9 +++++++++ .../upgradevalidations/preflightvalidations.go | 9 +++++++++ test/framework/cluster.go | 2 +- 8 files changed, 64 insertions(+), 1 deletion(-) diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 4173209b3cfe..0c15bec760f4 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -824,6 +824,7 @@ const ( Kube127 KubernetesVersion = "1.27" Kube128 KubernetesVersion = "1.28" Kube129 KubernetesVersion = "1.29" + Kube130 KubernetesVersion = "1.30" ) // KubeVersionToSemver converts kube version to semver for comparisons. diff --git a/pkg/features/features.go b/pkg/features/features.go index 7cb88ec06d58..94b4b02d9b0a 100644 --- a/pkg/features/features.go +++ b/pkg/features/features.go @@ -8,6 +8,7 @@ const ( UseControllerForCli = "USE_CONTROLLER_FOR_CLI" VSphereInPlaceEnvVar = "VSPHERE_IN_PLACE_UPGRADE" APIServerExtraArgsEnabledEnvVar = "API_SERVER_EXTRA_ARGS_ENABLED" + K8s130SupportEnvVar = "K8S_1_30_SUPPORT" ) func FeedGates(featureGates []string) { @@ -64,3 +65,11 @@ func APIServerExtraArgsEnabled() Feature { IsActive: globalFeatures.isActiveForEnvVar(APIServerExtraArgsEnabledEnvVar), } } + +// K8s130Support is the feature flag for Kubernetes 1.30 support. +func K8s130Support() Feature { + return Feature{ + Name: "Kubernetes version 1.30 support", + IsActive: globalFeatures.isActiveForEnvVar(K8s130SupportEnvVar), + } +} diff --git a/pkg/features/features_test.go b/pkg/features/features_test.go index 739d5f4c146c..db46c689e675 100644 --- a/pkg/features/features_test.go +++ b/pkg/features/features_test.go @@ -85,3 +85,11 @@ func TestAPIServerExtraArgsEnabledFeatureFlag(t *testing.T) { g.Expect(os.Setenv(APIServerExtraArgsEnabledEnvVar, "true")).To(Succeed()) g.Expect(IsActive(APIServerExtraArgsEnabled())).To(BeTrue()) } + +func TestWithK8s130FeatureFlag(t *testing.T) { + g := NewWithT(t) + setupContext(t) + + g.Expect(os.Setenv(K8s130SupportEnvVar, "true")).To(Succeed()) + g.Expect(IsActive(K8s130Support())).To(BeTrue()) +} diff --git a/pkg/validations/cluster.go b/pkg/validations/cluster.go index e4a75f656155..af8bc4761124 100644 --- a/pkg/validations/cluster.go +++ b/pkg/validations/cluster.go @@ -10,6 +10,7 @@ import ( "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" + "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/semver" @@ -267,3 +268,13 @@ func ValidateManagementComponentsVersionSkew(ctx context.Context, k KubectlClien } return nil } + +// ValidateK8s130Support checks if the 1.30 feature flag is set when using k8s 1.30. +func ValidateK8s130Support(clusterSpec *cluster.Spec) error { + if !features.IsActive(features.K8s130Support()) { + if clusterSpec.Cluster.Spec.KubernetesVersion == v1alpha1.Kube130 { + return fmt.Errorf("kubernetes version %s is not enabled. Please set the env variable %v", v1alpha1.Kube130, features.K8s130SupportEnvVar) + } + } + return nil +} diff --git a/pkg/validations/cluster_test.go b/pkg/validations/cluster_test.go index 553b598f13a7..ab460b254406 100644 --- a/pkg/validations/cluster_test.go +++ b/pkg/validations/cluster_test.go @@ -15,6 +15,7 @@ import ( "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/providers" providermocks "github.com/aws/eks-anywhere/pkg/providers/mocks" "github.com/aws/eks-anywhere/pkg/types" @@ -742,3 +743,18 @@ func TestValidateManagementComponentsVersionSkew(t *testing.T) { }) } } + +func TestValidateK8s130Support(t *testing.T) { + tt := newTest(t) + tt.clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.Kube130 + tt.Expect(validations.ValidateK8s130Support(tt.clusterSpec)).To( + MatchError(ContainSubstring("kubernetes version 1.30 is not enabled. Please set the env variable K8S_1_30_SUPPORT"))) +} + +func TestValidateK8s130SupportActive(t *testing.T) { + tt := newTest(t) + tt.clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.Kube130 + features.ClearCache() + os.Setenv(features.K8s130SupportEnvVar, "true") + tt.Expect(validations.ValidateK8s130Support(tt.clusterSpec)).To(Succeed()) +} diff --git a/pkg/validations/createvalidations/preflightvalidations.go b/pkg/validations/createvalidations/preflightvalidations.go index 76fa21e31a6c..00bd5c76e6fd 100644 --- a/pkg/validations/createvalidations/preflightvalidations.go +++ b/pkg/validations/createvalidations/preflightvalidations.go @@ -7,6 +7,7 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" + "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/validations" ) @@ -49,6 +50,14 @@ func (v *CreateValidations) PreflightValidations(ctx context.Context) []validati Err: validations.ValidateEksaVersion(ctx, v.Opts.CliVersion, v.Opts.Spec), } }, + func() *validations.ValidationResult { + return &validations.ValidationResult{ + Name: "validate kubernetes version 1.30 support", + Remediation: fmt.Sprintf("ensure %v env variable is set", features.K8s130SupportEnvVar), + Err: validations.ValidateK8s130Support(v.Opts.Spec), + Silent: true, + } + }, } if v.Opts.Spec.Cluster.IsManaged() { diff --git a/pkg/validations/upgradevalidations/preflightvalidations.go b/pkg/validations/upgradevalidations/preflightvalidations.go index 650bd9f941e2..ff0612f82430 100644 --- a/pkg/validations/upgradevalidations/preflightvalidations.go +++ b/pkg/validations/upgradevalidations/preflightvalidations.go @@ -9,6 +9,7 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" + "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/validation" @@ -122,6 +123,14 @@ func (u *UpgradeValidations) PreflightValidations(ctx context.Context) []validat Err: validations.ValidatePauseAnnotation(ctx, k, targetCluster, targetCluster.Name), } }, + func() *validations.ValidationResult { + return &validations.ValidationResult{ + Name: "validate kubernetes version 1.30 support", + Remediation: fmt.Sprintf("ensure %v env variable is set", features.K8s130SupportEnvVar), + Err: validations.ValidateK8s130Support(u.Opts.Spec), + Silent: true, + } + }, } if u.Opts.Spec.Cluster.IsManaged() { diff --git a/test/framework/cluster.go b/test/framework/cluster.go index f5970d25992c..2b5c2a6b2a26 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -2180,7 +2180,7 @@ func dumpFile(description, path string, t T) { func (e *ClusterE2ETest) setFeatureFlagForUnreleasedKubernetesVersion(version v1alpha1.KubernetesVersion) { // Update this variable to equal the feature flagged k8s version when applicable. // For example, if k8s 1.26 is under a feature flag, we would set this to v1alpha1.Kube126 - var unreleasedK8sVersion v1alpha1.KubernetesVersion + unreleasedK8sVersion := v1alpha1.Kube130 if version == unreleasedK8sVersion { // Set feature flag for the unreleased k8s version when applicable From e682a330f46741c0fbbadaa086a2c34a61aa90eb Mon Sep 17 00:00:00 2001 From: Xu Deng Date: Fri, 12 Apr 2024 12:25:44 -0400 Subject: [PATCH 055/193] Add 0.19.3 change (#7959) * Add 0.19.3 change * Update docs/content/en/docs/whatsnew/changelog.md Co-authored-by: Abhay Krishna * Update docs/content/en/docs/whatsnew/changelog.md Co-authored-by: Abhay Krishna --------- Co-authored-by: EKS Distro PR Bot Co-authored-by: Abhay Krishna --- docs/content/en/docs/whatsnew/changelog.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 8d6944ac5af5..f168e6374780 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -30,6 +30,23 @@ description: > * When upgrading to a new minor version, a new OS image must be created using the new image-builder CLI pertaining to that release. {{% /alert %}} +## [v0.19.3](https://github.com/aws/eks-anywhere/releases/tag/v0.19.3) + +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +### Changed +- Updated helm to v3.14.3 [#3050](https://github.com/aws/eks-anywhere-build-tooling/pull/3050) + +### Fixed +- Bumped golang.org/x/net that has a fix for [vulnerability GO-2024-2687](https://pkg.go.dev/vuln/GO-2024-2687) +- Fixed proxy configurations for airgapped environments [#7913](https://github.com/aws/eks-anywhere/pull/7913) ## [v0.19.2](https://github.com/aws/eks-anywhere/releases/tag/v0.19.2) From f9e7ba9db401ba07469b95b74b6e63ce216861bb Mon Sep 17 00:00:00 2001 From: Abhinav Pandey Date: Fri, 12 Apr 2024 11:27:44 -0700 Subject: [PATCH 056/193] Add automated cleanup for etcd encryption tests (#7978) --- test/e2e/cloudstack_test.go | 22 ++++++++ test/framework/etcdencryption.go | 93 ++++++++++++++++++++++++++++---- 2 files changed, 104 insertions(+), 11 deletions(-) diff --git a/test/e2e/cloudstack_test.go b/test/e2e/cloudstack_test.go index 90c52949b04f..f3ca154fdb08 100644 --- a/test/e2e/cloudstack_test.go +++ b/test/e2e/cloudstack_test.go @@ -4106,6 +4106,28 @@ func TestCloudStackWorkloadClusterOIDCAuthGithubFluxAPI(t *testing.T) { test.DeleteManagementCluster() } +func TestCloudStackKubernetes129EtcdEncryption(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + ), + framework.WithPodIamConfig(), + ) + test.OSFamily = v1alpha1.RedHat + test.GenerateClusterConfig() + test.CreateCluster() + test.PostClusterCreateEtcdEncryptionSetup() + test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{framework.WithEtcdEncrytion()}) + test.StopIfFailed() + test.ValidateEtcdEncryption() + test.DeleteCluster() +} + func TestCloudStackKubernetes127To128RedHatManagementCPUpgradeAPI(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes127()) test := framework.NewClusterE2ETest( diff --git a/test/framework/etcdencryption.go b/test/framework/etcdencryption.go index e3b6e17b33b9..088dbb55f2a9 100644 --- a/test/framework/etcdencryption.go +++ b/test/framework/etcdencryption.go @@ -36,15 +36,16 @@ const ( kmsKeyRegion = "T_KMS_KEY_REGION" kmsSocketVar = "T_KMS_SOCKET" - defaultRegion = "us-west-2" - keysFilename = "keys.json" + defaultRegion = "us-west-2" + keysFilename = "keys.json" + keyIDFilenameFormat = "%s-oidc-keyid" // SSHKeyPath is the path where the SSH private key is stored on the test-runner instance. SSHKeyPath = "/tmp/ssh_key" ) //go:embed config/pod-identity-webhook.yaml -var podIdentityWebhookManifest []byte +var podIdentityWebhookManifest string //go:embed config/aws-kms-encryption-provider.yaml var kmsProviderManifest string @@ -190,6 +191,9 @@ func (e *ClusterE2ETest) PostClusterCreateEtcdEncryptionSetup() { e.T.Fatal(err) } + // register cleanup step to remove the keys from s3 after the test is done + e.T.Cleanup(e.cleanup) + if err := e.deployPodIdentityWebhook(ctx, envVars); err != nil { e.T.Fatal(err) } @@ -199,6 +203,63 @@ func (e *ClusterE2ETest) PostClusterCreateEtcdEncryptionSetup() { } } +func (e *ClusterE2ETest) cleanup() { + e.T.Log("Removing cluster's key from the IAM OIDC config") + data, err := os.ReadFile(fmt.Sprintf(keyIDFilenameFormat, e.ClusterName)) + if err != nil { + e.T.Logf("failed to read key ID from file, skipping cleanup: %v", err) + return + } + + envVars := getEtcdEncryptionVarsFromEnv() + awsSession, err := session.NewSession(&aws.Config{ + Region: aws.String(defaultRegion), + }) + if err != nil { + e.T.Fatalf("creating aws session for cleanup: %v", err) + } + + // download the current keys json from S3 to add the current cluster's cert + content, err := s3.Download(awsSession, keysFilename, envVars.S3Bucket) + if err != nil { + e.T.Logf("downloading %s from s3: %v", keysFilename, err) + return + } + + resp := &keyResponse{} + if err = json.Unmarshal(content, resp); err != nil { + e.T.Logf("unmarshaling %s into json: %v", keysFilename, err) + return + } + + keyID := string(data) + index := -1 + for i, key := range resp.Keys { + if strings.EqualFold(keyID, key.KeyID) { + index = i + break + } + } + + if index >= 0 { + resp = &keyResponse{ + Keys: append(resp.Keys[0:index], resp.Keys[index+1:]...), + } + + keysJSON, err := json.MarshalIndent(resp, "", " ") + if err != nil { + e.T.Logf("marshaling keys.json: %v", err) + return + } + + // upload the modified keys json to s3 with the public read access + if err = s3.Upload(awsSession, keysJSON, keysFilename, envVars.S3Bucket, s3.WithPublicRead()); err != nil { + e.T.Logf("upload new keys.json to s3: %v", err) + return + } + } +} + func getIssuerURL() string { etcdEncryptionConfig := getEtcdEncryptionVarsFromEnv() return fmt.Sprintf("https://s3.%s.amazonaws.com/%s", defaultRegion, etcdEncryptionConfig.S3Bucket) @@ -206,7 +267,14 @@ func getIssuerURL() string { func (e *ClusterE2ETest) deployPodIdentityWebhook(ctx context.Context, envVars *etcdEncryptionTestVars) error { e.T.Log("Deploying Pod Identity Webhook") - if err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), podIdentityWebhookManifest); err != nil { + values := map[string]string{ + "podIdentityWebhookImage": envVars.PodIdentityWebhookImage, + } + manifest, err := templater.Execute(podIdentityWebhookManifest, values) + if err != nil { + return fmt.Errorf("templating pod identity webhook manifest: %v", err) + } + if err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, e.Cluster(), manifest); err != nil { return fmt.Errorf("deploying pod identity webhook: %v", err) } return nil @@ -215,13 +283,12 @@ func (e *ClusterE2ETest) deployPodIdentityWebhook(ctx context.Context, envVars * func (e *ClusterE2ETest) deployKMSProvider(ctx context.Context, envVars *etcdEncryptionTestVars) error { e.T.Log("Deploying AWS KMS Encryption Provider") values := map[string]string{ - "kmsImage": envVars.KmsImage, - "podIdentityWebhookImage": envVars.PodIdentityWebhookImage, - "kmsIamRole": envVars.KmsIamRole, - "kmsKeyArn": envVars.KmsKeyArn, - "kmsKeyRegion": envVars.KmsKeyRegion, - "kmsSocket": envVars.KmsSocket, - "serviceAccountName": "kms-encrypter-decrypter", + "kmsImage": envVars.KmsImage, + "kmsIamRole": envVars.KmsIamRole, + "kmsKeyArn": envVars.KmsKeyArn, + "kmsKeyRegion": envVars.KmsKeyRegion, + "kmsSocket": envVars.KmsSocket, + "serviceAccountName": "kms-encrypter-decrypter", } if e.OSFamily != v1alpha1.Bottlerocket { @@ -269,6 +336,10 @@ func (e *ClusterE2ETest) addClusterCertToIrsaOidcProvider(ctx context.Context, e return fmt.Errorf("marshaling keys.json: %v", err) } + if err := os.WriteFile(fmt.Sprintf(keyIDFilenameFormat, e.ClusterName), []byte(newKey.KeyID), os.ModeAppend); err != nil { + return fmt.Errorf("writing OIDC key ID to file: %v", err) + } + // upload the modified keys json to s3 with the public read access if err = s3.Upload(awsSession, keysJSON, keysFilename, envVars.S3Bucket, s3.WithPublicRead()); err != nil { return fmt.Errorf("upload new keys.json to s3: %v", err) From 4beac0a6f803fde159abd53d37b43301deb8bd13 Mon Sep 17 00:00:00 2001 From: Xu Deng Date: Fri, 12 Apr 2024 14:40:44 -0400 Subject: [PATCH 057/193] Update brew version (#7975) --- .../testdata/main-bundle-release.yaml | 40 +++++++++---------- .../brew-version-release/CLI_RELEASE_VERSION | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 679a59cad856..4857a414220e 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -306,7 +306,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -315,9 +315,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -326,7 +326,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -1084,7 +1084,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1093,9 +1093,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1104,7 +1104,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -1862,7 +1862,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1871,9 +1871,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1882,7 +1882,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -2640,7 +2640,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -2649,9 +2649,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -2660,7 +2660,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -3418,7 +3418,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -3427,9 +3427,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.2/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -3438,7 +3438,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: diff --git a/release/triggers/brew-version-release/CLI_RELEASE_VERSION b/release/triggers/brew-version-release/CLI_RELEASE_VERSION index 77c98fcea717..b3f4adba7b0f 100644 --- a/release/triggers/brew-version-release/CLI_RELEASE_VERSION +++ b/release/triggers/brew-version-release/CLI_RELEASE_VERSION @@ -1 +1 @@ -v0.19.2 \ No newline at end of file +v0.19.3 \ No newline at end of file From 2febc0bdb21cca26804cf515cbf18dfcef596391 Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:23:44 -0700 Subject: [PATCH 058/193] adding required env variables for e2e test (#7981) --- .../build/buildspecs/vsphere-test-eks-a-cli.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml index ffb0fd7fe5db..0e6d327238bc 100644 --- a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml @@ -79,6 +79,10 @@ env: T_PRIVATE_REGISTRY_MIRROR_USERNAME: "harbor-registry-data:authenticated_username" T_PRIVATE_REGISTRY_MIRROR_PASSWORD: "harbor-registry-data:authenticated_password" T_PRIVATE_REGISTRY_MIRROR_CA_CERT: "harbor-registry-data:authenticated_caCert" + T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY1: "harbor-registry-data:ocinamespace_registry1" + T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE1: "harbor-registry-data:ocinamespace_namespace1" + T_REGISTRY_MIRROR_OCINAMESPACES_REGISTRY2: "harbor-registry-data:ocinamespace_registry2" + T_REGISTRY_MIRROR_OCINAMESPACES_NAMESPACE2: "harbor-registry-data:ocinamespace_namespace2" T_NTP_SERVERS: "ntp:servers" T_AWS_IAM_ROLE_ARN: "aws-iam-auth-role:ec2_role_arn" T_IRSA_S3_BUCKET: "etcd-encryption:irsa_s3_bucket" From 797d0c7414a7b604dd3c79b05aa378f407bdb285 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Fri, 12 Apr 2024 20:36:44 -0700 Subject: [PATCH 059/193] [PR BOT] Generate release testdata files (#7976) --- .../testdata/main-bundle-release.yaml | 1018 +++++++++++++++-- 1 file changed, 898 insertions(+), 120 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 4857a414220e..30df388c96fa 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -10,7 +10,7 @@ spec: versionsBundles: - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml controller: arch: - amd64 @@ -19,7 +19,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -30,8 +30,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 bottlerocketHostContainers: admin: arch: @@ -67,7 +67,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -76,7 +76,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -85,7 +85,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -94,10 +94,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml + version: v1.14.2+abcdef1 webhook: arch: - amd64 @@ -106,7 +106,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -167,7 +167,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml controller: arch: - amd64 @@ -176,7 +176,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -187,11 +187,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml controller: arch: - amd64 @@ -200,7 +200,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -211,13 +211,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -235,10 +235,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 eksD: ami: bottlerocket: {} @@ -788,7 +788,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml controller: arch: - amd64 @@ -797,7 +797,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -808,8 +808,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 bottlerocketHostContainers: admin: arch: @@ -845,7 +845,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -854,7 +854,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -863,7 +863,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -872,10 +872,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml + version: v1.14.2+abcdef1 webhook: arch: - amd64 @@ -884,7 +884,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -945,7 +945,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml controller: arch: - amd64 @@ -954,7 +954,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -965,11 +965,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml controller: arch: - amd64 @@ -978,7 +978,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -989,13 +989,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1013,10 +1013,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 eksD: ami: bottlerocket: {} @@ -1566,7 +1566,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml controller: arch: - amd64 @@ -1575,7 +1575,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1586,8 +1586,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 bottlerocketHostContainers: admin: arch: @@ -1623,7 +1623,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -1632,7 +1632,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -1641,7 +1641,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -1650,10 +1650,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml + version: v1.14.2+abcdef1 webhook: arch: - amd64 @@ -1662,7 +1662,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -1723,7 +1723,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml controller: arch: - amd64 @@ -1732,7 +1732,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1743,11 +1743,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml controller: arch: - amd64 @@ -1756,7 +1756,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1767,13 +1767,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1791,10 +1791,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 eksD: ami: bottlerocket: {} @@ -2344,7 +2344,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml controller: arch: - amd64 @@ -2353,7 +2353,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2364,8 +2364,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 bottlerocketHostContainers: admin: arch: @@ -2401,7 +2401,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -2410,7 +2410,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -2419,7 +2419,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -2428,10 +2428,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml + version: v1.14.2+abcdef1 webhook: arch: - amd64 @@ -2440,7 +2440,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -2501,7 +2501,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml controller: arch: - amd64 @@ -2510,7 +2510,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2521,11 +2521,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml controller: arch: - amd64 @@ -2534,7 +2534,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2545,13 +2545,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -2569,10 +2569,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 eksD: ami: bottlerocket: {} @@ -3122,7 +3122,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml controller: arch: - amd64 @@ -3131,7 +3131,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3142,8 +3142,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 bottlerocketHostContainers: admin: arch: @@ -3179,7 +3179,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -3188,7 +3188,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -3197,7 +3197,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -3206,10 +3206,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.13.2/cert-manager.yaml - version: v1.13.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml + version: v1.14.2+abcdef1 webhook: arch: - amd64 @@ -3218,7 +3218,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.13.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -3279,7 +3279,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml controller: arch: - amd64 @@ -3288,7 +3288,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3299,11 +3299,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml controller: arch: - amd64 @@ -3312,7 +3312,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3323,13 +3323,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -3347,10 +3347,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.1/metadata.yaml - version: v1.6.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 eksD: ami: bottlerocket: {} @@ -3898,4 +3898,782 @@ spec: metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml version: v1.8.5+abcdef1 + - bootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for kubeadm-bootstrap-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-bootstrap-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 + bottlerocketHostContainers: + admin: + arch: + - amd64 + description: Container image for bottlerocket-admin image + imageDigest: sha256:2f884ebc34c0f54b6d75493ba2ad645f42525d6c90fe2099ae50708de6f86152 + name: bottlerocket-admin + os: linux + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.11.6 + control: + arch: + - amd64 + description: Container image for bottlerocket-control image + imageDigest: sha256:0ed73c9a2e35fab4287f3cbf18de084b740cace3234628c5e215adc407e52750 + name: bottlerocket-control + os: linux + uri: public.ecr.aws/bottlerocket/bottlerocket-control:v0.7.10 + kubeadmBootstrap: + arch: + - amd64 + - arm64 + description: Container image for bottlerocket-bootstrap image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: bottlerocket-bootstrap + os: linux + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-1-eks-a-v0.0.0-dev-build.1 + certManager: + acmesolver: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-acmesolver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-acmesolver + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 + cainjector: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-cainjector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-cainjector + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 + controller: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-controller + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 + ctl: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-ctl image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-ctl + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml + version: v1.14.2+abcdef1 + webhook: + arch: + - amd64 + - arm64 + description: Container image for cert-manager-webhook image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-webhook + os: linux + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 + cilium: + cilium: + arch: + - amd64 + description: Container image for cilium image + imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 + name: cilium + os: linux + uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 + helmChart: + description: Helm chart for cilium-chart + imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 + name: cilium-chart + uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml + operator: + arch: + - amd64 + description: Container image for operator-generic image + imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d + name: operator-generic + os: linux + uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 + version: v1.13.13-eksa.1 + cloudStack: + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-cloudstack image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-cloudstack + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.10-rc1-eks-a-v0.0.0-dev-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/infrastructure-components.yaml + kubeRbacProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml + version: v0.4.10-rc1+abcdef1 + clusterAPI: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 + controlPlane: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for kubeadm-control-plane-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-control-plane-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 + docker: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + manager: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-docker image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-docker + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml + version: v1.6.3+abcdef1 + eksD: + ami: + bottlerocket: {} + channel: 1-30 + components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml + containerd: + arch: + - amd64 + description: containerd tarball for linux/amd64 + name: containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/f089d308442c18f487a52d09fd067ae9ac7cd8f2/etcdadm-v0.0.0-dev-build.0-linux-amd64.tar.gz + gitCommit: 0123456789abcdef0123456789abcdef01234567 + imagebuilder: + arch: + - amd64 + description: image-builder tarball for linux/amd64 + name: image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/image-builder/v0.5.0/image-builder-v0.0.0-dev-build.0-linux-amd64.tar.gz + kindNode: + arch: + - amd64 + - arm64 + description: Container image for kind-node image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kind-node + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-rc.0-eks-d-1-30-1-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.30.0-rc.0 + manifestUrl: https://eks-d-postsubmit-artifacts.s3.us-west-2.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-1.yaml + name: kubernetes-1-30-eks-1 + ova: + bottlerocket: {} + raw: + bottlerocket: {} + eksa: + cliTools: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-cli-tools image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cli-tools + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 + clusterController: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-cluster-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cluster-controller + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml + diagnosticCollector: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-diagnostic-collector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-diagnostic-collector + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 + version: v0.0.0-dev+build.0+abcdef1 + etcdadmBootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/bootstrap-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for etcdadm-bootstrap-provider image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-bootstrap-provider + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.13-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml + version: v1.0.13+abcdef1 + etcdadmController: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml + controller: + arch: + - amd64 + - arm64 + description: Container image for etcdadm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-controller + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml + version: v1.0.20+abcdef1 + flux: + helmController: + arch: + - amd64 + - arm64 + description: Container image for helm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: helm-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + kustomizeController: + arch: + - amd64 + - arm64 + description: Container image for kustomize-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kustomize-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + notificationController: + arch: + - amd64 + - arm64 + description: Container image for notification-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: notification-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + sourceController: + arch: + - amd64 + - arm64 + description: Container image for source-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: source-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 + version: v2.2.3+abcdef1 + haproxy: + image: + arch: + - amd64 + - arm64 + description: Container image for haproxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: haproxy + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + kindnetd: + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml + version: v0.22.0+abcdef1 + kubeVersion: "1.30" + nutanix: + cloudProvider: + arch: + - amd64 + - arm64 + description: Container image for cloud-provider-nutanix image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cloud-provider-nutanix + os: linux + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cloud-provider-nutanix/controller:v0.3.2-eks-a-v0.0.0-dev-build.1 + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-nutanix image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-nutanix + os: linux + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml + version: v1.3.2+abcdef1 + packageController: + credentialProviderPackage: + arch: + - amd64 + - arm64 + description: Container image for credential-provider-package image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: credential-provider-package + os: linux + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 + helmChart: + description: Helm chart for eks-anywhere-packages + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 + packageController: + arch: + - amd64 + - arm64 + description: Container image for eks-anywhere-packages image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 + tokenRefresher: + arch: + - amd64 + - arm64 + description: Container image for ecr-token-refresher image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: ecr-token-refresher + os: linux + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 + version: v0.4.1+abcdef1 + snow: + bottlerocketBootstrapSnow: + arch: + - amd64 + - arm64 + description: Container image for bottlerocket-bootstrap-snow image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: bottlerocket-bootstrap-snow + os: linux + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-1-eks-a-v0.0.0-dev-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + manager: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-snow-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-snow-controller + os: linux + uri: public.ecr.aws/release-container-registry/aws/cluster-api-provider-aws-snow/manager:v0.1.27-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/metadata.yaml + version: v0.1.27+abcdef1 + tinkerbell: + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-tinkerbell image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-tinkerbell + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + envoy: + arch: + - amd64 + - arm64 + description: Container image for envoy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: envoy + os: linux + uri: public.ecr.aws/release-container-registry/envoyproxy/envoy:v1.22.2.0-prod-eks-a-v0.0.0-dev-build.1 + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + tinkerbellStack: + actions: + cexec: + arch: + - amd64 + - arm64 + description: Container image for cexec image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cexec + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + imageToDisk: + arch: + - amd64 + - arm64 + description: Container image for image2disk image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: image2disk + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + kexec: + arch: + - amd64 + - arm64 + description: Container image for kexec image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kexec + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + ociToDisk: + arch: + - amd64 + - arm64 + description: Container image for oci2disk image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: oci2disk + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + reboot: + arch: + - amd64 + - arm64 + description: Container image for reboot image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: reboot + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + writeFile: + arch: + - amd64 + - arm64 + description: Container image for writefile image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: writefile + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + boots: + arch: + - amd64 + - arm64 + description: Container image for boots image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: boots + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/boots:v0.8.1-eks-a-v0.0.0-dev-build.1 + hegel: + arch: + - amd64 + - arm64 + description: Container image for hegel image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hegel + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + hook: + bootkit: + arch: + - amd64 + - arm64 + description: Container image for hook-bootkit image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hook-bootkit + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + docker: + arch: + - amd64 + - arm64 + description: Container image for hook-docker image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hook-docker + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + initramfs: + amd: + description: Tinkerbell operating system installation environment (osie) + component + name: initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + arm: + description: Tinkerbell operating system installation environment (osie) + component + name: initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + kernel: + arch: + - amd64 + - arm64 + description: Container image for hook-kernel image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: hook-kernel + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + vmlinuz: + amd: + description: Tinkerbell operating system installation environment (osie) + component + name: vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + arm: + description: Tinkerbell operating system installation environment (osie) + component + name: vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + rufio: + arch: + - amd64 + - arm64 + description: Container image for rufio image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: rufio + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + tink: + tinkController: + arch: + - amd64 + - arm64 + description: Container image for tink-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tink-controller + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + tinkServer: + arch: + - amd64 + - arm64 + description: Container image for tink-server image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tink-server + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + tinkWorker: + arch: + - amd64 + - arm64 + description: Container image for tink-worker image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tink-worker + os: linux + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + tinkerbellChart: + description: Helm chart for tinkerbell-chart + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: tinkerbell-chart + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + version: v0.5.2+abcdef1 + upgrader: + upgrader: + arch: + - amd64 + - arm64 + description: Container image for upgrader image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: upgrader + os: linux + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-1-eks-a-v0.0.0-dev-build.1 + vSphere: + clusterAPIController: + arch: + - amd64 + - arm64 + description: Container image for cluster-api-provider-vsphere image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-provider-vsphere + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + kubeProxy: + arch: + - amd64 + - arm64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + kubeVip: + arch: + - amd64 + - arm64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + manager: + arch: + - amd64 + - arm64 + description: Container image for cloud-provider-vsphere image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cloud-provider-vsphere + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.30.0-rc.0-eks-d-1-30-eks-a-v0.0.0-dev-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml + version: v1.8.5+abcdef1 status: {} From 463357e3c1301e6a025eb8c5d504022dc08dea03 Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Sun, 14 Apr 2024 23:38:46 -0700 Subject: [PATCH 060/193] Passing along ocinamespaces env var to tests (#7984) --- internal/test/e2e/registryMirror.go | 10 ++++++++++ test/framework/registry_mirror.go | 5 +++++ 2 files changed, 15 insertions(+) diff --git a/internal/test/e2e/registryMirror.go b/internal/test/e2e/registryMirror.go index cc8c7fdf49fc..9c134433e106 100644 --- a/internal/test/e2e/registryMirror.go +++ b/internal/test/e2e/registryMirror.go @@ -71,6 +71,16 @@ func (e *E2ESession) setupRegistryMirrorEnv(testRegex string) error { } } + re = regexp.MustCompile(`^.*OciNamespaces.*$`) + if re.MatchString(testRegex) { + ociNamespacesEnvVar := e2etests.RequiredOciNamespacesEnvVars() + for _, eVar := range ociNamespacesEnvVar { + if val, ok := os.LookupEnv(eVar); ok { + e.testEnvVars[eVar] = val + } + } + } + return nil } diff --git a/test/framework/registry_mirror.go b/test/framework/registry_mirror.go index 6e6ce730da33..c39fcec9efb3 100644 --- a/test/framework/registry_mirror.go +++ b/test/framework/registry_mirror.go @@ -147,6 +147,11 @@ func RequiredRegistryMirrorEnvVars() []string { return append(registryMirrorRequiredEnvVars, registryMirrorDockerAirgappedRequiredEnvVars...) } +// RequiredOciNamespacesEnvVars returns the Env variables to set for OCI Namespaces tests. +func RequiredOciNamespacesEnvVars() []string { + return append(registryMirrorOciNamespacesRequiredEnvVars, RegistryMirrorOciNamespacesRegistry2Var, RegistryMirrorOciNamespacesNamespace2Var) +} + func setupRegistryMirrorEndpointAndCert(e *ClusterE2ETest, providerName string, insecureSkipVerify bool, ociNamespaces ...v1alpha1.OCINamespace) { var endpoint, hostPort, username, password, registryCert string port := "443" From a69717a4a789d89d2ecc379f573931b4282c66ef Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Mon, 15 Apr 2024 12:33:46 -0700 Subject: [PATCH 061/193] Fixing registry mirror setup function return (#7988) --- internal/test/e2e/registryMirror.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/test/e2e/registryMirror.go b/internal/test/e2e/registryMirror.go index 9c134433e106..c09b1d65c1d2 100644 --- a/internal/test/e2e/registryMirror.go +++ b/internal/test/e2e/registryMirror.go @@ -56,7 +56,9 @@ func (e *E2ESession) setupRegistryMirrorEnv(testRegex string) error { } if endpoint != "" && port != "" && caCert != "" { - return e.mountRegistryCert(caCert, net.JoinHostPort(endpoint, port)) + if err := e.mountRegistryCert(caCert, net.JoinHostPort(endpoint, port)); err != nil { + return err + } } re = regexp.MustCompile(`^.*Docker.*Airgapped.*$`) From 315a0227b4e761a1f3483ecb86c97379810e3831 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Apr 2024 13:17:47 -0700 Subject: [PATCH 062/193] Bump codecov/codecov-action from 4.2.0 to 4.3.0 (#7987) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.2.0 to 4.3.0. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.2.0...v4.3.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/go-coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go-coverage.yml b/.github/workflows/go-coverage.yml index 2869683e6691..e417d5f79801 100644 --- a/.github/workflows/go-coverage.yml +++ b/.github/workflows/go-coverage.yml @@ -22,7 +22,7 @@ jobs: - name: Run go test with coverage run: COVER_PROFILE=coverage.txt make coverage-unit-test - name: Codecov upload - uses: codecov/codecov-action@v4.2.0 + uses: codecov/codecov-action@v4.3.0 with: files: ./coverage.txt token: ${{ secrets.CODECOV_TOKEN }} From c8d144ac1a481e776aa7c7e93f10ef5921d09263 Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Mon, 15 Apr 2024 14:18:47 -0700 Subject: [PATCH 063/193] Add processor for Tinkerbell Template Config (#7980) --- config/manifest/eksa-components.yaml | 3 + config/rbac/role.yaml | 3 + controllers/cluster_controller.go | 6 +- pkg/api/v1alpha1/cluster_types.go | 8 ++ pkg/api/v1alpha1/cluster_types_test.go | 42 ++++++++++ pkg/cluster/build.go | 2 +- pkg/cluster/tinkerbell.go | 15 +++- pkg/cluster/tinkerbell_test.go | 102 +++++++++++++++++++++++++ 8 files changed, 176 insertions(+), 5 deletions(-) diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index a113b8cabaff..f7fe0c2459f4 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -7172,6 +7172,7 @@ rules: - snowmachineconfigs - tinkerbelldatacenterconfigs - tinkerbellmachineconfigs + - tinkerbelltemplateconfigs - vspheredatacenterconfigs - vspheremachineconfigs verbs: @@ -7193,6 +7194,7 @@ rules: - snowmachineconfigs/finalizers - tinkerbelldatacenterconfigs/finalizers - tinkerbellmachineconfigs/finalizers + - tinkerbelltemplateconfigs/finalizers - vspheredatacenterconfigs/finalizers - vspheremachineconfigs/finalizers verbs: @@ -7209,6 +7211,7 @@ rules: - snowmachineconfigs/status - tinkerbelldatacenterconfigs/status - tinkerbellmachineconfigs/status + - tinkerbelltemplateconfigs/status - vspheredatacenterconfigs/status - vspheremachineconfigs/status verbs: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index bfd3550bb7fe..f5b81ea51e36 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -86,6 +86,7 @@ rules: - snowmachineconfigs - tinkerbelldatacenterconfigs - tinkerbellmachineconfigs + - tinkerbelltemplateconfigs - vspheredatacenterconfigs - vspheremachineconfigs verbs: @@ -107,6 +108,7 @@ rules: - snowmachineconfigs/finalizers - tinkerbelldatacenterconfigs/finalizers - tinkerbellmachineconfigs/finalizers + - tinkerbelltemplateconfigs/finalizers - vspheredatacenterconfigs/finalizers - vspheremachineconfigs/finalizers verbs: @@ -123,6 +125,7 @@ rules: - snowmachineconfigs/status - tinkerbelldatacenterconfigs/status - tinkerbellmachineconfigs/status + - tinkerbelltemplateconfigs/status - vspheredatacenterconfigs/status - vspheremachineconfigs/status verbs: diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index f9694fc8c7cf..11af51efe2fd 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -175,10 +175,10 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager, log logr.Logger) // +kubebuilder:rbac:groups="",resources=namespaces,verbs=create;delete // +kubebuilder:rbac:groups="",resources=nodes,verbs=list // +kubebuilder:rbac:groups=addons.cluster.x-k8s.io,resources=clusterresourcesets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters;gitopsconfigs;snowmachineconfigs;snowdatacenterconfigs;snowippools;vspheredatacenterconfigs;vspheremachineconfigs;dockerdatacenterconfigs;tinkerbellmachineconfigs;tinkerbelldatacenterconfigs;cloudstackdatacenterconfigs;cloudstackmachineconfigs;nutanixdatacenterconfigs;nutanixmachineconfigs;awsiamconfigs;oidcconfigs;awsiamconfigs;fluxconfigs,verbs=get;list;watch;update;patch -// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/status;snowmachineconfigs/status;snowippools/status;vspheredatacenterconfigs/status;vspheremachineconfigs/status;dockerdatacenterconfigs/status;tinkerbelldatacenterconfigs/status;tinkerbellmachineconfigs/status;cloudstackdatacenterconfigs/status;cloudstackmachineconfigs/status;awsiamconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters;gitopsconfigs;snowmachineconfigs;snowdatacenterconfigs;snowippools;vspheredatacenterconfigs;vspheremachineconfigs;dockerdatacenterconfigs;tinkerbellmachineconfigs;tinkerbelltemplateconfigs;tinkerbelldatacenterconfigs;cloudstackdatacenterconfigs;cloudstackmachineconfigs;nutanixdatacenterconfigs;nutanixmachineconfigs;awsiamconfigs;oidcconfigs;awsiamconfigs;fluxconfigs,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/status;snowmachineconfigs/status;snowippools/status;vspheredatacenterconfigs/status;vspheremachineconfigs/status;dockerdatacenterconfigs/status;tinkerbelldatacenterconfigs/status;tinkerbellmachineconfigs/status;tinkerbelltemplateconfigs/status;cloudstackdatacenterconfigs/status;cloudstackmachineconfigs/status;awsiamconfigs/status,verbs=get;update;patch // +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=bundles,verbs=get;list;watch -// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/finalizers;snowmachineconfigs/finalizers;snowippools/finalizers;vspheredatacenterconfigs/finalizers;vspheremachineconfigs/finalizers;cloudstackdatacenterconfigs/finalizers;cloudstackmachineconfigs/finalizers;dockerdatacenterconfigs/finalizers;bundles/finalizers;awsiamconfigs/finalizers;tinkerbelldatacenterconfigs/finalizers;tinkerbellmachineconfigs/finalizers,verbs=update +// +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters/finalizers;snowmachineconfigs/finalizers;snowippools/finalizers;vspheredatacenterconfigs/finalizers;vspheremachineconfigs/finalizers;cloudstackdatacenterconfigs/finalizers;cloudstackmachineconfigs/finalizers;dockerdatacenterconfigs/finalizers;bundles/finalizers;awsiamconfigs/finalizers;tinkerbelldatacenterconfigs/finalizers;tinkerbellmachineconfigs/finalizers;tinkerbelltemplateconfigs/finalizers,verbs=update // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigtemplates,verbs=create;get;list;patch;update;watch // +kubebuilder:rbac:groups="cluster.x-k8s.io",resources=machinedeployments,verbs=list;watch;get;patch;update;create;delete // +kubebuilder:rbac:groups="cluster.x-k8s.io",resources=clusters,verbs=list;watch;get;patch;update;create;delete diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 0c15bec760f4..fbc7f79d04aa 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -1046,6 +1046,14 @@ func (n *Ref) Equal(o *Ref) bool { return n.Kind == o.Kind && n.Name == o.Name } +// IsEmpty checks if the given ref object is empty. +func (n Ref) IsEmpty() bool { + if n.Kind == "" && n.Name == "" { + return true + } + return false +} + // +kubebuilder:object:generate=false // Interface for getting DatacenterRef fields for Cluster type. type ProviderRefAccessor interface { diff --git a/pkg/api/v1alpha1/cluster_types_test.go b/pkg/api/v1alpha1/cluster_types_test.go index 036cd117223a..c479ec9f6acd 100644 --- a/pkg/api/v1alpha1/cluster_types_test.go +++ b/pkg/api/v1alpha1/cluster_types_test.go @@ -854,6 +854,48 @@ func TestClusterEqualGitOpsRef(t *testing.T) { } } +func TestClusterIsEmptyRef(t *testing.T) { + testCases := []struct { + testName string + templateRef v1alpha1.Ref + want bool + }{ + { + testName: "kind not empty", + templateRef: v1alpha1.Ref{ + Kind: "k1", + }, + want: false, + }, + { + testName: "name not empty", + templateRef: v1alpha1.Ref{ + Name: "n1", + }, + want: false, + }, + { + testName: "both not empty", + templateRef: v1alpha1.Ref{ + Kind: "k", + Name: "n", + }, + want: false, + }, + { + testName: "both not empty", + templateRef: v1alpha1.Ref{}, + want: true, + }, + } + for _, tt := range testCases { + t.Run(tt.testName, func(t *testing.T) { + g := NewWithT(t) + g.Expect(tt.templateRef.IsEmpty()).To(Equal(tt.want)) + }) + } +} + func TestClusterEqualClusterNetwork(t *testing.T) { testCases := []struct { testName string diff --git a/pkg/cluster/build.go b/pkg/cluster/build.go index 23621d4931b8..f83d9949515f 100644 --- a/pkg/cluster/build.go +++ b/pkg/cluster/build.go @@ -6,7 +6,7 @@ func NewDefaultConfigClientBuilder() *ConfigClientBuilder { return NewConfigClientBuilder().Register( getCloudStackMachineConfigs, getCloudStackDatacenter, - getTinkerbellMachineConfigs, + getTinkerbellMachineAndTemplateConfigs, getTinkerbellDatacenter, getDockerDatacenter, getVSphereDatacenter, diff --git a/pkg/cluster/tinkerbell.go b/pkg/cluster/tinkerbell.go index 04d1d87d1d99..5b83adf17d3f 100644 --- a/pkg/cluster/tinkerbell.go +++ b/pkg/cluster/tinkerbell.go @@ -106,7 +106,7 @@ func getTinkerbellDatacenter(ctx context.Context, client Client, c *Config) erro return nil } -func getTinkerbellMachineConfigs(ctx context.Context, client Client, c *Config) error { +func getTinkerbellMachineAndTemplateConfigs(ctx context.Context, client Client, c *Config) error { if c.Cluster.Spec.DatacenterRef.Kind != anywherev1.TinkerbellDatacenterKind { return nil } @@ -125,6 +125,19 @@ func getTinkerbellMachineConfigs(ctx context.Context, client Client, c *Config) } c.TinkerbellMachineConfigs[machineConfig.Name] = machineConfig + + if !machineConfig.Spec.TemplateRef.IsEmpty() { + if c.TinkerbellTemplateConfigs == nil { + c.TinkerbellTemplateConfigs = map[string]*anywherev1.TinkerbellTemplateConfig{} + } + + templateRefName := machineConfig.Spec.TemplateRef.Name + templateConfig := &anywherev1.TinkerbellTemplateConfig{} + if err := client.Get(ctx, templateRefName, c.Cluster.Namespace, templateConfig); err != nil { + return err + } + c.TinkerbellTemplateConfigs[templateRefName] = templateConfig + } } return nil } diff --git a/pkg/cluster/tinkerbell_test.go b/pkg/cluster/tinkerbell_test.go index 8f2ce1dbfcf2..a3251eb6694b 100644 --- a/pkg/cluster/tinkerbell_test.go +++ b/pkg/cluster/tinkerbell_test.go @@ -251,3 +251,105 @@ func TestDefaultConfigClientBuilderTinkerbellCluster(t *testing.T) { g.Expect(config.TinkerbellMachineConfigs["machine-1"]).To(Equal(machineControlPlane)) g.Expect(config.TinkerbellMachineConfigs["machine-2"]).To(Equal(machineWorker)) } + +func TestDefaultConfigClientBuilderTinkerbellClusterWithTemplateConfig(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + b := cluster.NewDefaultConfigClientBuilder() + ctrl := gomock.NewController(t) + client := mocks.NewMockClient(ctrl) + cluster := &anywherev1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + Spec: anywherev1.ClusterSpec{ + DatacenterRef: anywherev1.Ref{ + Kind: anywherev1.TinkerbellDatacenterKind, + Name: "datacenter", + }, + ControlPlaneConfiguration: anywherev1.ControlPlaneConfiguration{ + MachineGroupRef: &anywherev1.Ref{ + Kind: anywherev1.TinkerbellMachineConfigKind, + Name: "machine-1", + }, + }, + WorkerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{ + { + MachineGroupRef: &anywherev1.Ref{ + Kind: anywherev1.TinkerbellMachineConfigKind, + Name: "machine-2", + }, + }, + { + MachineGroupRef: &anywherev1.Ref{ + Kind: anywherev1.CloudStackMachineConfigKind, // Should not process this one + Name: "machine-3", + }, + }, + }, + }, + } + datacenter := &anywherev1.TinkerbellDatacenterConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "datacenter", + Namespace: "default", + }, + } + machineControlPlane := &anywherev1.TinkerbellMachineConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-1", + Namespace: "default", + }, + Spec: anywherev1.TinkerbellMachineConfigSpec{ + TemplateRef: anywherev1.Ref{ + Name: "template-name", + }, + }, + } + machineWorker := &anywherev1.TinkerbellMachineConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-2", + Namespace: "default", + }, + } + client.EXPECT().Get(ctx, "datacenter", "default", &anywherev1.TinkerbellDatacenterConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + d := obj.(*anywherev1.TinkerbellDatacenterConfig) + d.ObjectMeta = datacenter.ObjectMeta + d.Spec = datacenter.Spec + return nil + }, + ) + client.EXPECT().Get(ctx, "machine-1", "default", &anywherev1.TinkerbellMachineConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + m := obj.(*anywherev1.TinkerbellMachineConfig) + m.ObjectMeta = machineControlPlane.ObjectMeta + m.Spec = machineControlPlane.Spec + return nil + }, + ) + client.EXPECT().Get(ctx, "machine-2", "default", &anywherev1.TinkerbellMachineConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + m := obj.(*anywherev1.TinkerbellMachineConfig) + m.ObjectMeta = machineWorker.ObjectMeta + return nil + }, + ) + client.EXPECT().Get(ctx, "template-name", "default", &anywherev1.TinkerbellTemplateConfig{}).Return(nil).DoAndReturn( + func(_ context.Context, _, _ string, obj runtime.Object) error { + m := obj.(*anywherev1.TinkerbellTemplateConfig) + m.ObjectMeta = machineWorker.ObjectMeta + return nil + }, + ) + + config, err := b.Build(ctx, client, cluster) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(config).NotTo(BeNil()) + g.Expect(config.Cluster).To(Equal(cluster)) + g.Expect(config.TinkerbellDatacenter).To(Equal(datacenter)) + g.Expect(len(config.TinkerbellMachineConfigs)).To(Equal(2)) + g.Expect(config.TinkerbellMachineConfigs["machine-1"]).To(Equal(machineControlPlane)) + g.Expect(config.TinkerbellMachineConfigs["machine-2"]).To(Equal(machineWorker)) +} From 45b8acb4bda60be9ba5f5ea3bf7b2124cca0de6a Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Mon, 15 Apr 2024 19:31:45 -0700 Subject: [PATCH 064/193] [PR BOT] Generate release testdata files (#7994) --- .../pkg/operations/testdata/main-bundle-release.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 30df388c96fa..d6d039dd938a 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -638,7 +638,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -1416,7 +1416,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -2194,7 +2194,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -2972,7 +2972,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -3750,7 +3750,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: @@ -4528,7 +4528,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hegel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.10.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hegel:v0.12.0-eks-a-v0.0.0-dev-build.1 hook: bootkit: arch: From 489181c77dd106939ad02086d03740961b5ec280 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Tue, 16 Apr 2024 20:06:46 -0700 Subject: [PATCH 065/193] [PR BOT] Generate release testdata files (#7997) --- .../testdata/main-bundle-release.yaml | 300 +++++++++--------- 1 file changed, 150 insertions(+), 150 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index d6d039dd938a..98a37bace77b 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -10,7 +10,7 @@ spec: versionsBundles: - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml controller: arch: - amd64 @@ -19,7 +19,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -30,8 +30,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 bottlerocketHostContainers: admin: arch: @@ -67,7 +67,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -76,7 +76,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -85,7 +85,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -94,10 +94,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml - version: v1.14.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml + version: v1.14.4+abcdef1 webhook: arch: - amd64 @@ -106,7 +106,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -167,7 +167,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml controller: arch: - amd64 @@ -176,7 +176,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -187,11 +187,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml controller: arch: - amd64 @@ -200,7 +200,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -211,13 +211,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -235,10 +235,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 eksD: ami: bottlerocket: {} @@ -697,7 +697,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -788,7 +788,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml controller: arch: - amd64 @@ -797,7 +797,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -808,8 +808,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 bottlerocketHostContainers: admin: arch: @@ -845,7 +845,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -854,7 +854,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -863,7 +863,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -872,10 +872,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml - version: v1.14.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml + version: v1.14.4+abcdef1 webhook: arch: - amd64 @@ -884,7 +884,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -945,7 +945,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml controller: arch: - amd64 @@ -954,7 +954,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -965,11 +965,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml controller: arch: - amd64 @@ -978,7 +978,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -989,13 +989,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1013,10 +1013,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 eksD: ami: bottlerocket: {} @@ -1475,7 +1475,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -1566,7 +1566,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml controller: arch: - amd64 @@ -1575,7 +1575,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1586,8 +1586,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 bottlerocketHostContainers: admin: arch: @@ -1623,7 +1623,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -1632,7 +1632,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -1641,7 +1641,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -1650,10 +1650,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml - version: v1.14.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml + version: v1.14.4+abcdef1 webhook: arch: - amd64 @@ -1662,7 +1662,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -1723,7 +1723,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml controller: arch: - amd64 @@ -1732,7 +1732,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1743,11 +1743,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml controller: arch: - amd64 @@ -1756,7 +1756,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1767,13 +1767,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1791,10 +1791,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 eksD: ami: bottlerocket: {} @@ -2253,7 +2253,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -2344,7 +2344,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml controller: arch: - amd64 @@ -2353,7 +2353,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2364,8 +2364,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 bottlerocketHostContainers: admin: arch: @@ -2401,7 +2401,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -2410,7 +2410,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -2419,7 +2419,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -2428,10 +2428,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml - version: v1.14.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml + version: v1.14.4+abcdef1 webhook: arch: - amd64 @@ -2440,7 +2440,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -2501,7 +2501,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml controller: arch: - amd64 @@ -2510,7 +2510,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2521,11 +2521,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml controller: arch: - amd64 @@ -2534,7 +2534,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2545,13 +2545,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -2569,10 +2569,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 eksD: ami: bottlerocket: {} @@ -3031,7 +3031,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -3122,7 +3122,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml controller: arch: - amd64 @@ -3131,7 +3131,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3142,8 +3142,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 bottlerocketHostContainers: admin: arch: @@ -3179,7 +3179,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -3188,7 +3188,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -3197,7 +3197,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -3206,10 +3206,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml - version: v1.14.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml + version: v1.14.4+abcdef1 webhook: arch: - amd64 @@ -3218,7 +3218,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -3279,7 +3279,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml controller: arch: - amd64 @@ -3288,7 +3288,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3299,11 +3299,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml controller: arch: - amd64 @@ -3312,7 +3312,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3323,13 +3323,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -3347,10 +3347,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 eksD: ami: bottlerocket: {} @@ -3809,7 +3809,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: @@ -3900,7 +3900,7 @@ spec: version: v1.8.5+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml controller: arch: - amd64 @@ -3909,7 +3909,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3920,8 +3920,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 bottlerocketHostContainers: admin: arch: @@ -3957,7 +3957,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -3966,7 +3966,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -3975,7 +3975,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -3984,10 +3984,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.2/cert-manager.yaml - version: v1.14.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml + version: v1.14.4+abcdef1 webhook: arch: - amd64 @@ -3996,7 +3996,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -4057,7 +4057,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml controller: arch: - amd64 @@ -4066,7 +4066,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -4077,11 +4077,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml controller: arch: - amd64 @@ -4090,7 +4090,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -4101,13 +4101,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -4125,10 +4125,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.3/metadata.yaml - version: v1.6.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml + version: v1.6.4+abcdef1 eksD: ami: bottlerocket: {} @@ -4587,7 +4587,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: rufio os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:afd7cd82fa08dae8f9f3ffac96eb030176f3abbd-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/rufio:v0.3.3-eks-a-v0.0.0-dev-build.1 tink: tinkController: arch: From c90fd24dc5b249dd611f23524031597c5ca81b9f Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Tue, 16 Apr 2024 21:36:46 -0700 Subject: [PATCH 066/193] [PR BOT] Generate release testdata files (#8000) --- .../testdata/main-bundle-release.yaml | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 98a37bace77b..87e67446427f 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -707,7 +707,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -716,7 +716,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -725,12 +725,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -1485,7 +1485,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -1494,7 +1494,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -1503,12 +1503,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -2263,7 +2263,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -2272,7 +2272,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -2281,12 +2281,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -3041,7 +3041,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -3050,7 +3050,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -3059,12 +3059,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -3819,7 +3819,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -3828,7 +3828,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -3837,12 +3837,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: @@ -4597,7 +4597,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-controller os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-controller:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkServer: arch: - amd64 @@ -4606,7 +4606,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-server os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-server:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkWorker: arch: - amd64 @@ -4615,12 +4615,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tink-worker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.8.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tink/tink-worker:v0.10.0-eks-a-v0.0.0-dev-build.1 tinkerbellChart: description: Helm chart for tinkerbell-chart imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart - uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 version: v0.5.2+abcdef1 upgrader: upgrader: From 17feb3f3eb7231b1f74d0a76af8778bf053b423e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Apr 2024 22:03:52 -0700 Subject: [PATCH 067/193] Bump github.com/aws/aws-sdk-go from 1.51.18 to 1.51.22 in /release/cli (#7998) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.51.18 to 1.51.22. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.51.18...v1.51.22) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index dae75bc0d457..3b72d3f251ba 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.51.18 + github.com/aws/aws-sdk-go v1.51.22 github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index d36c3a3dbe2d..58ba81897c45 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.18 h1:JKrk49ZlBTyKa4+droU7U/hk0QG84v91xaA58O0LPdo= -github.com/aws/aws-sdk-go v1.51.18/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.22 h1:VL2p2JgC32myt7DMEcbe1devdtgGSgMNvZpkcdvlxq4= +github.com/aws/aws-sdk-go v1.51.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From b2d7bf39faa923a6cbc2dfff809fbc2c85cd3bd3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Apr 2024 22:03:58 -0700 Subject: [PATCH 068/193] Bump helm.sh/helm/v3 from 3.14.3 to 3.14.4 in /release/cli (#7999) Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.14.3 to 3.14.4. - [Release notes](https://github.com/helm/helm/releases) - [Commits](https://github.com/helm/helm/compare/v3.14.3...v3.14.4) --- updated-dependencies: - dependency-name: helm.sh/helm/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 3b72d3f251ba..8f12b7a5d334 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -16,7 +16,7 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 golang.org/x/sync v0.7.0 - helm.sh/helm/v3 v3.14.3 + helm.sh/helm/v3 v3.14.4 k8s.io/apimachinery v0.29.3 k8s.io/helm v2.17.0+incompatible sigs.k8s.io/controller-runtime v0.16.5 diff --git a/release/cli/go.sum b/release/cli/go.sum index 58ba81897c45..2b1f22eb4452 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -867,8 +867,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.14.3 h1:HmvRJlwyyt9HjgmAuxHbHv3PhMz9ir/XNWHyXfmnOP4= -helm.sh/helm/v3 v3.14.3/go.mod h1:v6myVbyseSBJTzhmeE39UcPLNv6cQK6qss3dvgAySaE= +helm.sh/helm/v3 v3.14.4 h1:6FSpEfqyDalHq3kUr4gOMThhgY55kXUEjdQoyODYnrM= +helm.sh/helm/v3 v3.14.4/go.mod h1:Tje7LL4gprZpuBNTbG34d1Xn5NmRT3OWfBRwpOSer9I= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 63445dd26f3c8ecbcd6e5ee9de43339779d35f2e Mon Sep 17 00:00:00 2001 From: Soto Sugita Date: Thu, 18 Apr 2024 07:19:47 +0900 Subject: [PATCH 069/193] Fix typo in tinkerbell-overview.md (#7996) --- .../en/docs/getting-started/baremetal/tinkerbell-overview.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md b/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md index 3dab38e17ae7..f871c152e172 100644 --- a/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md +++ b/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md @@ -243,7 +243,7 @@ NAME STATUS ROLES AGE VERSION INTERNAL eksa-da04 Ready control-plane,master 9m5s v1.22.10-eks-7dc61e8 10.80.30.23 ``` ```bash -kubectl get logs -n eksa-system | grep hegel +kubectl get pods -n eksa-system | grep hegel ``` ``` hegel-n7ngs From 6e42d97fe80bcfd82570402e6cc01525ea4f54d7 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 17 Apr 2024 19:49:47 -0700 Subject: [PATCH 070/193] [PR BOT] Generate release testdata files (#8005) --- .../testdata/main-bundle-release.yaml | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 87e67446427f..6f67e9767ad1 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -751,11 +751,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -784,8 +784,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.25.3-eks-d-1-25-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml + version: v1.9.3+abcdef1 - bootstrap: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml @@ -1529,11 +1529,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -1562,8 +1562,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.26.2-eks-d-1-26-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml + version: v1.9.3+abcdef1 - bootstrap: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml @@ -2307,11 +2307,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -2340,8 +2340,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.27.0-eks-d-1-27-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml + version: v1.9.3+abcdef1 - bootstrap: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml @@ -3085,11 +3085,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -3118,8 +3118,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.28.0-eks-d-1-28-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml + version: v1.9.3+abcdef1 - bootstrap: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml @@ -3863,11 +3863,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -3896,8 +3896,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.29.0-eks-d-1-29-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml + version: v1.9.3+abcdef1 - bootstrap: components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml @@ -4641,11 +4641,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.8.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -4674,6 +4674,6 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.30.0-rc.0-eks-d-1-30-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.8.5/metadata.yaml - version: v1.8.5+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml + version: v1.9.3+abcdef1 status: {} From 55d37aeb6901050aff63d8cfb22ae13540d62fa0 Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Thu, 18 Apr 2024 13:06:24 -0500 Subject: [PATCH 071/193] Retry pulling artifacts when running download images cmd (#8004) --- pkg/curatedpackages/curatedpackages.go | 14 ++++++++++++-- pkg/docker/registry.go | 7 ++++++- pkg/docker/registry_test.go | 3 +++ pkg/helm/download.go | 10 +++++++++- pkg/helm/download_test.go | 2 ++ 5 files changed, 32 insertions(+), 4 deletions(-) diff --git a/pkg/curatedpackages/curatedpackages.go b/pkg/curatedpackages/curatedpackages.go index 44602e7ee5ee..e68542c2d4be 100644 --- a/pkg/curatedpackages/curatedpackages.go +++ b/pkg/curatedpackages/curatedpackages.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/go-logr/logr" "oras.land/oras-go/pkg/content" @@ -15,12 +16,13 @@ import ( "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/types" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const ( - license = `The Amazon EKS Anywhere Curated Packages are only available to customers with the + license = `The Amazon EKS Anywhere Curated Packages are only available to customers with the Amazon EKS Anywhere Enterprise Subscription` width = 86 ) @@ -70,7 +72,15 @@ func PrintLicense() { func PullLatestBundle(ctx context.Context, log logr.Logger, artifact string) ([]byte, error) { puller := artifacts.NewRegistryPuller(log) - data, err := puller.Pull(ctx, artifact, "") + var data []byte + err := retrier.Retry(5, 200*time.Millisecond, func() error { + d, err := puller.Pull(ctx, artifact, "") + if err != nil { + return err + } + data = d + return nil + }) if err != nil { return nil, fmt.Errorf("unable to pull artifacts %v", err) } diff --git a/pkg/docker/registry.go b/pkg/docker/registry.go index 585dcc7a3406..276a0dede0ad 100644 --- a/pkg/docker/registry.go +++ b/pkg/docker/registry.go @@ -5,8 +5,10 @@ import ( "fmt" "runtime" "strings" + "time" "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/retrier" ) // These constants are temporary since currently there is a limitation on harbor @@ -64,12 +66,14 @@ func (d *ImageRegistryDestination) Write(ctx context.Context, images ...string) type ImageOriginalRegistrySource struct { client ImagePuller processor *ConcurrentImageProcessor + Retrier retrier.Retrier } func NewOriginalRegistrySource(client ImagePuller) *ImageOriginalRegistrySource { return &ImageOriginalRegistrySource{ client: client, processor: NewConcurrentImageProcessor(runtime.GOMAXPROCS(0)), + Retrier: *retrier.NewWithMaxRetries(5, 200*time.Second), } } @@ -79,7 +83,8 @@ func (s *ImageOriginalRegistrySource) Load(ctx context.Context, images ...string logger.V(3).Info("Starting pull", "numberOfImages", len(images)) err := s.processor.Process(ctx, images, func(ctx context.Context, image string) error { - if err := s.client.PullImage(ctx, image); err != nil { + err := s.Retrier.Retry(func() error { return s.client.PullImage(ctx, image) }) + if err != nil { return err } diff --git a/pkg/docker/registry_test.go b/pkg/docker/registry_test.go index 9fd82622dbf2..61ef9816ce63 100644 --- a/pkg/docker/registry_test.go +++ b/pkg/docker/registry_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/docker" "github.com/aws/eks-anywhere/pkg/docker/mocks" + "github.com/aws/eks-anywhere/pkg/retrier" ) func TestNewRegistryDestination(t *testing.T) { @@ -119,6 +120,7 @@ func TestNewOriginalRegistrySource(t *testing.T) { images := []string{"image1:1", "image2:2"} ctx := context.Background() dstLoader := docker.NewOriginalRegistrySource(client) + dstLoader.Retrier = *retrier.NewWithMaxRetries(1, 0) for _, i := range images { client.EXPECT().PullImage(test.AContext(), i) } @@ -134,6 +136,7 @@ func TestOriginalRegistrySourceError(t *testing.T) { images := []string{"image1:1", "image2:2"} ctx := context.Background() dstLoader := docker.NewOriginalRegistrySource(client) + dstLoader.Retrier = *retrier.NewWithMaxRetries(1, 0) client.EXPECT().PullImage(test.AContext(), images[0]).Return(errors.New("error pulling")) client.EXPECT().PullImage(test.AContext(), images[1]).MaxTimes(1) diff --git a/pkg/helm/download.go b/pkg/helm/download.go index a97d373fc747..eab31434fd80 100644 --- a/pkg/helm/download.go +++ b/pkg/helm/download.go @@ -4,8 +4,10 @@ import ( "context" "fmt" "sort" + "time" "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/utils/oci" ) @@ -13,12 +15,15 @@ import ( type ChartRegistryDownloader struct { client Client dstFolder string + + Retrier retrier.Retrier } func NewChartRegistryDownloader(client Client, dstFolder string) *ChartRegistryDownloader { return &ChartRegistryDownloader{ client: client, dstFolder: dstFolder, + Retrier: *retrier.NewWithMaxRetries(5, 200*time.Second), } } @@ -26,7 +31,10 @@ func (d *ChartRegistryDownloader) Download(ctx context.Context, charts ...string for _, chart := range uniqueCharts(charts) { chartURL, chartVersion := oci.ChartURLAndVersion(chart) logger.Info("Saving helm chart to disk", "chart", chart) - if err := d.client.SaveChart(ctx, chartURL, chartVersion, d.dstFolder); err != nil { + err := d.Retrier.Retry(func() error { + return d.client.SaveChart(ctx, chartURL, chartVersion, d.dstFolder) + }) + if err != nil { return fmt.Errorf("downloading chart [%s] from registry: %v", chart, err) } } diff --git a/pkg/helm/download_test.go b/pkg/helm/download_test.go index 27cf2aff14ab..a3e1d486a167 100644 --- a/pkg/helm/download_test.go +++ b/pkg/helm/download_test.go @@ -10,6 +10,7 @@ import ( "github.com/aws/eks-anywhere/pkg/helm" "github.com/aws/eks-anywhere/pkg/helm/mocks" + "github.com/aws/eks-anywhere/pkg/retrier" ) func TestChartRegistryDownloaderDownload(t *testing.T) { @@ -37,5 +38,6 @@ func TestChartRegistryDownloaderDownloadError(t *testing.T) { client.EXPECT().SaveChart(ctx, "oci://ecr.com/chart2", "v2.2.0", folder).Return(errors.New("failed downloading")) d := helm.NewChartRegistryDownloader(client, folder) + d.Retrier = *retrier.NewWithMaxRetries(1, 0) g.Expect(d.Download(ctx, charts...)).To(MatchError(ContainSubstring("downloading chart [ecr.com/chart2:v2.2.0] from registry: failed downloading"))) } From 7bf18bb2f1d467f0b0e220b754206109c4611a4e Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Thu, 18 Apr 2024 13:34:53 -0500 Subject: [PATCH 072/193] Log in to public ECR for E2E test (#8001) --- internal/test/e2e/ecr.go | 22 ++++++++++++++++++++++ internal/test/e2e/setup.go | 5 +++++ 2 files changed, 27 insertions(+) create mode 100644 internal/test/e2e/ecr.go diff --git a/internal/test/e2e/ecr.go b/internal/test/e2e/ecr.go new file mode 100644 index 000000000000..9738ba09e7a2 --- /dev/null +++ b/internal/test/e2e/ecr.go @@ -0,0 +1,22 @@ +package e2e + +import ( + "fmt" + + "github.com/go-logr/logr" + + "github.com/aws/eks-anywhere/internal/pkg/ssm" +) + +func (e *E2ESession) loginToPublicECR() error { + e.logger.V(1).Info("Logging in to public ECR") + + command := "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws" + if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil { + return fmt.Errorf("sign in to public ecr: %v", err) + } + + e.logger.V(1).Info("Logged in to public ECR") + + return nil +} diff --git a/internal/test/e2e/setup.go b/internal/test/e2e/setup.go index 554d4c98f995..27d1f1aa58ff 100644 --- a/internal/test/e2e/setup.go +++ b/internal/test/e2e/setup.go @@ -178,6 +178,11 @@ func (e *E2ESession) setup(regex string) error { return err } + err = e.loginToPublicECR() + if err != nil { + return err + } + ipPool := e.ipPool.ToString() if ipPool != "" { e.testEnvVars[e2etests.ClusterIPPoolEnvVar] = ipPool From 63d4edca546e35d2df8018215d10cc636f4c34bb Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Thu, 18 Apr 2024 12:05:58 -0700 Subject: [PATCH 073/193] Remove unused code (#7990) --- pkg/cluster/spec_test.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pkg/cluster/spec_test.go b/pkg/cluster/spec_test.go index 2d36875e9885..9c7b90fd4337 100644 --- a/pkg/cluster/spec_test.go +++ b/pkg/cluster/spec_test.go @@ -1,7 +1,6 @@ package cluster_test import ( - "embed" "testing" eksdv1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1" @@ -12,13 +11,9 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/files" - "github.com/aws/eks-anywhere/pkg/manifests/eksd" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) -//go:embed testdata -var testdataFS embed.FS - func TestNewSpecError(t *testing.T) { version := test.DevEksaVersion() tests := []struct { @@ -375,14 +370,3 @@ func validateVersionedRepo(t *testing.T, gotImage cluster.VersionedRepository, w t.Errorf("GetNewSpec() = Spec: Invalid kubernetes repo, got %s, want %s", gotImage.Tag, wantTag) } } - -func readEksdRelease(tb testing.TB, url string) *eksdv1.Release { - tb.Helper() - r := files.NewReader() - release, err := eksd.ReadManifest(r, url) - if err != nil { - tb.Fatalf("Failed reading eks-d manifest: %s", err) - } - - return release -} From 380f66067f1892927dc00ceb4517ec68ec113941 Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Thu, 18 Apr 2024 15:18:53 -0500 Subject: [PATCH 074/193] Log test runner instance config in e2e (#8007) --- internal/test/e2e/run.go | 112 +++++++++++++++++--------------- internal/test/e2e/setup.go | 21 +++--- internal/test/e2e/testRunner.go | 32 ++++----- test/framework/cluster.go | 1 + 4 files changed, 88 insertions(+), 78 deletions(-) diff --git a/internal/test/e2e/run.go b/internal/test/e2e/run.go index fa4c61b4899b..197401caa56e 100644 --- a/internal/test/e2e/run.go +++ b/internal/test/e2e/run.go @@ -125,7 +125,7 @@ func RunTestsInParallel(conf ParallelRunConf) error { for c := range work { r := instanceTestsResults{conf: c} - r.conf.instanceId, r.testCommandResult, err = RunTests(c, invCatalogue) + r.conf.InstanceID, r.testCommandResult, err = RunTests(c, invCatalogue) if err != nil { r.err = err } @@ -150,22 +150,22 @@ func RunTestsInParallel(conf ParallelRunConf) error { // Once the tool is updated to support the unified message, remove them if r.err != nil { result = testResultError - conf.Logger.Error(r.err, "Failed running e2e tests for instance", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "tests", r.conf.regex, "status", testResultFail) + conf.Logger.Error(r.err, "Failed running e2e tests for instance", "jobId", r.conf.JobID, "instanceId", r.conf.InstanceID, "tests", r.conf.Regex, "status", testResultFail) failedInstances++ } else if !r.testCommandResult.Successful() { result = testResultFail - conf.Logger.Info("An e2e instance run has failed", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "commandId", r.testCommandResult.CommandId, "tests", r.conf.regex, "status", testResultFail) + conf.Logger.Info("An e2e instance run has failed", "jobId", r.conf.JobID, "instanceId", r.conf.InstanceID, "commandId", r.testCommandResult.CommandId, "tests", r.conf.Regex, "status", testResultFail) failedInstances++ } else { result = testResultPass - conf.Logger.Info("Instance tests completed successfully", "jobId", r.conf.jobId, "instanceId", r.conf.instanceId, "commandId", r.testCommandResult.CommandId, "tests", r.conf.regex, "status", testResultPass) + conf.Logger.Info("Instance tests completed successfully", "jobId", r.conf.JobID, "instanceId", r.conf.InstanceID, "commandId", r.testCommandResult.CommandId, "tests", r.conf.Regex, "status", testResultPass) } completedInstances++ conf.Logger.Info("Instance tests run finished", "result", result, - "tests", r.conf.regex, - "jobId", r.conf.jobId, - "instanceId", r.conf.instanceId, + "tests", r.conf.Regex, + "jobId", r.conf.JobID, + "instanceId", r.conf.InstanceID, "completedInstances", completedInstances, "totalInstances", totalInstances, ) @@ -179,29 +179,35 @@ func RunTestsInParallel(conf ParallelRunConf) error { } type instanceRunConf struct { - session *session.Session - instanceProfileName, storageBucket, jobId, parentJobId, regex, instanceId string - testReportFolder, branchName string - ipPool networkutils.IPPool - hardware []*api.Hardware - hardwareCount int - tinkerbellAirgappedTest bool - bundlesOverride bool - testRunnerType TestRunnerType - testRunnerConfig TestInfraConfig - cleanupVms bool - logger logr.Logger + InstanceProfileName string + StorageBucket string + JobID string + ParentJobID string + Regex string + InstanceID string + TestReportFolder string + BranchName string + IPPool networkutils.IPPool + Hardware []*api.Hardware + HardwareCount int + TinkerbellAirgappedTest bool + BundlesOverride bool + TestRunnerType TestRunnerType + TestRunnerConfig TestInfraConfig + CleanupVMs bool + Logger logr.Logger + Session *session.Session } //nolint:gocyclo, revive // RunTests responsible launching test runner to run tests is complex. func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatalogue) (testInstanceID string, testCommandResult *testCommandResult, err error) { - testRunner, err := newTestRunner(conf.testRunnerType, conf.testRunnerConfig) + testRunner, err := newTestRunner(conf.TestRunnerType, conf.TestRunnerConfig) if err != nil { return "", nil, err } - if conf.hardwareCount > 0 { + if conf.HardwareCount > 0 { var hardwareCatalogue *hardwareCatalogue - if conf.tinkerbellAirgappedTest { + if conf.TinkerbellAirgappedTest { hardwareCatalogue = inventoryCatalogue[airgappedHardware] } else { hardwareCatalogue = inventoryCatalogue[nonAirgappedHardware] @@ -214,16 +220,18 @@ func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatal defer releaseTinkerbellHardware(&conf, hardwareCatalogue) } + conf.Logger.Info("Creating runner instance", "cfg", conf) instanceId, err := testRunner.createInstance(conf) if err != nil { return "", nil, err } - conf.logger.V(1).Info("TestRunner instance has been created", "instanceId", instanceId) + conf.Logger = conf.Logger.WithValues("instance_id", instanceId) + conf.Logger.Info("TestRunner instance has been created") defer func() { err := testRunner.decommInstance(conf) if err != nil { - conf.logger.V(1).Info("WARN: Failed to decomm e2e test runner instance", "error", err) + conf.Logger.V(1).Info("WARN: Failed to decomm e2e test runner instance", "error", err) } }() @@ -232,12 +240,12 @@ func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatal return "", nil, err } - err = session.setup(conf.regex) + err = session.setup(conf.Regex) if err != nil { return session.instanceId, nil, err } - testCommandResult, err = session.runTests(conf.regex) + testCommandResult, err = session.runTests(conf.Regex) if err != nil { return session.instanceId, nil, err } @@ -288,13 +296,13 @@ func (e *E2ESession) runTests(regex string) (testCommandResult *testCommandResul } func (c instanceRunConf) runPostTestsProcessing(e *E2ESession, testCommandResult *testCommandResult) error { - regex := strings.Trim(c.regex, "\"") + regex := strings.Trim(c.Regex, "\"") tests := strings.Split(regex, "|") for _, testName := range tests { e.uploadJUnitReportFromInstance(testName) - if c.testReportFolder != "" { - e.downloadJUnitReportToLocalDisk(testName, c.testReportFolder) + if c.TestReportFolder != "" { + e.downloadJUnitReportToLocalDisk(testName, c.TestReportFolder) } if !testCommandResult.Successful() { @@ -486,30 +494,30 @@ func getTinkerbellTestsWithCount(tinkerbellTests []string, conf ParallelRunConf) func newInstanceRunConf(awsSession *session.Session, conf ParallelRunConf, jobNumber int, testRegex string, ipPool networkutils.IPPool, hardware []*api.Hardware, hardwareCount int, tinkerbellAirgappedTest bool, testRunnerType TestRunnerType, testRunnerConfig *TestInfraConfig) instanceRunConf { jobID := fmt.Sprintf("%s-%d", conf.JobId, jobNumber) return instanceRunConf{ - session: awsSession, - instanceProfileName: conf.InstanceProfileName, - storageBucket: conf.StorageBucket, - jobId: jobID, - parentJobId: conf.JobId, - regex: testRegex, - ipPool: ipPool, - hardware: hardware, - hardwareCount: hardwareCount, - tinkerbellAirgappedTest: tinkerbellAirgappedTest, - bundlesOverride: conf.BundlesOverride, - testReportFolder: conf.TestReportFolder, - branchName: conf.BranchName, - cleanupVms: conf.CleanupVms, - testRunnerType: testRunnerType, - testRunnerConfig: *testRunnerConfig, - logger: conf.Logger.WithValues("jobID", jobID, "test", testRegex), + Session: awsSession, + InstanceProfileName: conf.InstanceProfileName, + StorageBucket: conf.StorageBucket, + JobID: jobID, + ParentJobID: conf.JobId, + Regex: testRegex, + IPPool: ipPool, + Hardware: hardware, + HardwareCount: hardwareCount, + TinkerbellAirgappedTest: tinkerbellAirgappedTest, + BundlesOverride: conf.BundlesOverride, + TestReportFolder: conf.TestReportFolder, + BranchName: conf.BranchName, + CleanupVMs: conf.CleanupVms, + TestRunnerType: testRunnerType, + TestRunnerConfig: *testRunnerConfig, + Logger: conf.Logger.WithValues("jobID", jobID, "test", testRegex), } } func logTestGroups(logger logr.Logger, instancesConf []instanceRunConf) { testGroups := make([]string, 0, len(instancesConf)) for _, i := range instancesConf { - testGroups = append(testGroups, i.regex) + testGroups = append(testGroups, i.Regex) } logger.V(1).Info("Running tests in parallel", "testsGroups", testGroups) } @@ -551,24 +559,24 @@ func getAirgappedHardwarePool(storageBucket string) ([]*api.Hardware, error) { } func reserveTinkerbellHardware(conf *instanceRunConf, invCatalogue *hardwareCatalogue) error { - reservedTinkerbellHardware, err := invCatalogue.reserveHardware(conf.hardwareCount) + reservedTinkerbellHardware, err := invCatalogue.reserveHardware(conf.HardwareCount) if err != nil { return fmt.Errorf("timed out waiting for hardware") } - conf.hardware = reservedTinkerbellHardware + conf.Hardware = reservedTinkerbellHardware logTinkerbellTestHardwareInfo(conf, "Reserved") return nil } func releaseTinkerbellHardware(conf *instanceRunConf, invCatalogue *hardwareCatalogue) { logTinkerbellTestHardwareInfo(conf, "Releasing") - invCatalogue.releaseHardware(conf.hardware) + invCatalogue.releaseHardware(conf.Hardware) } func logTinkerbellTestHardwareInfo(conf *instanceRunConf, action string) { var hardwareInfo []string - for _, hardware := range conf.hardware { + for _, hardware := range conf.Hardware { hardwareInfo = append(hardwareInfo, hardware.Hostname) } - conf.logger.V(1).Info(action+" hardware for TestRunner", "hardwarePool", strings.Join(hardwareInfo, ", ")) + conf.Logger.V(1).Info(action+" hardware for TestRunner", "hardwarePool", strings.Join(hardwareInfo, ", ")) } diff --git a/internal/test/e2e/setup.go b/internal/test/e2e/setup.go index 27d1f1aa58ff..a30f3635ef89 100644 --- a/internal/test/e2e/setup.go +++ b/internal/test/e2e/setup.go @@ -49,19 +49,19 @@ type E2ESession struct { func newE2ESession(instanceId string, conf instanceRunConf) (*E2ESession, error) { e := &E2ESession{ - session: conf.session, + session: conf.Session, instanceId: instanceId, - instanceProfileName: conf.instanceProfileName, - storageBucket: conf.storageBucket, - jobId: conf.jobId, - ipPool: conf.ipPool, + instanceProfileName: conf.InstanceProfileName, + storageBucket: conf.StorageBucket, + jobId: conf.JobID, + ipPool: conf.IPPool, testEnvVars: make(map[string]string), - bundlesOverride: conf.bundlesOverride, - cleanupVms: conf.cleanupVms, + bundlesOverride: conf.BundlesOverride, + cleanupVms: conf.CleanupVMs, requiredFiles: requiredFiles, - branchName: conf.branchName, - hardware: conf.hardware, - logger: conf.logger, + branchName: conf.BranchName, + hardware: conf.Hardware, + logger: conf.Logger, } return e, nil @@ -184,6 +184,7 @@ func (e *E2ESession) setup(regex string) error { } ipPool := e.ipPool.ToString() + if ipPool != "" { e.testEnvVars[e2etests.ClusterIPPoolEnvVar] = ipPool } diff --git a/internal/test/e2e/testRunner.go b/internal/test/e2e/testRunner.go index 0ce0e08b957e..7f6e9490e83a 100644 --- a/internal/test/e2e/testRunner.go +++ b/internal/test/e2e/testRunner.go @@ -132,10 +132,10 @@ func (v *VSphereTestRunner) setEnvironment() (map[string]string, error) { } func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { - name := getTestRunnerName(v.logger, c.jobId) + name := getTestRunnerName(v.logger, c.JobID) v.logger.V(1).Info("Creating vSphere Test Runner instance", "name", name) - ssmActivationInfo, err := ssm.CreateActivation(c.session, name, c.instanceProfileName) + ssmActivationInfo, err := ssm.CreateActivation(c.Session, name, c.InstanceProfileName) if err != nil { return "", fmt.Errorf("unable to create ssm activation: %v", err) } @@ -152,7 +152,7 @@ func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { PropertyMapping: []vsphere.OVFProperty{ {Key: ssmActivationCodeKey, Value: ssmActivationInfo.ActivationCode}, {Key: ssmActivationIdKey, Value: ssmActivationInfo.ActivationID}, - {Key: ssmActivationRegionKey, Value: *c.session.Config.Region}, + {Key: ssmActivationRegionKey, Value: *c.Session.Config.Region}, }, } @@ -163,7 +163,7 @@ func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { var ssmInstance *aws_ssm.InstanceInformation err = retrier.Retry(10, 5*time.Second, func() error { - ssmInstance, err = ssm.GetInstanceByActivationId(c.session, ssmActivationInfo.ActivationID) + ssmInstance, err = ssm.GetInstanceByActivationId(c.Session, ssmActivationInfo.ActivationID) if err != nil { return fmt.Errorf("failed to get ssm instance info post ovf deployment: %v", err) } @@ -180,19 +180,19 @@ func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { } func (e *Ec2TestRunner) createInstance(c instanceRunConf) (string, error) { - name := getTestRunnerName(e.logger, c.jobId) + name := getTestRunnerName(e.logger, c.JobID) e.logger.V(1).Info("Creating ec2 Test Runner instance", "name", name) - instanceId, err := ec2.CreateInstance(c.session, e.AmiID, key, tag, c.instanceProfileName, e.SubnetID, name) + instanceID, err := ec2.CreateInstance(c.Session, e.AmiID, key, tag, c.InstanceProfileName, e.SubnetID, name) if err != nil { return "", fmt.Errorf("creating instance for e2e tests: %v", err) } - e.logger.V(1).Info("Instance created", "instance-id", instanceId) - e.InstanceID = instanceId - return instanceId, nil + e.logger.V(1).Info("Instance created", "instance-id", instanceID) + e.InstanceID = instanceID + return instanceID, nil } func (v *VSphereTestRunner) tagInstance(c instanceRunConf, key, value string) error { - vmName := getTestRunnerName(v.logger, c.jobId) + vmName := getTestRunnerName(v.logger, c.JobID) vmPath := fmt.Sprintf("/%s/vm/%s/%s", v.Datacenter, v.Folder, vmName) tag := fmt.Sprintf("%s:%s", key, value) @@ -203,7 +203,7 @@ func (v *VSphereTestRunner) tagInstance(c instanceRunConf, key, value string) er } func (e *Ec2TestRunner) tagInstance(c instanceRunConf, key, value string) error { - err := ec2.TagInstance(c.session, e.InstanceID, key, value) + err := ec2.TagInstance(c.Session, e.InstanceID, key, value) if err != nil { return fmt.Errorf("failed to tag Ec2 test runner: %v", err) } @@ -211,9 +211,9 @@ func (e *Ec2TestRunner) tagInstance(c instanceRunConf, key, value string) error } func (v *VSphereTestRunner) decommInstance(c instanceRunConf) error { - _, deregisterError := ssm.DeregisterInstance(c.session, v.InstanceID) - _, deactivateError := ssm.DeleteActivation(c.session, v.ActivationId) - deleteError := cleanup.VsphereRmVms(context.Background(), getTestRunnerName(v.logger, c.jobId), executables.WithGovcEnvMap(v.envMap)) + _, deregisterError := ssm.DeregisterInstance(c.Session, v.InstanceID) + _, deactivateError := ssm.DeleteActivation(c.Session, v.ActivationId) + deleteError := cleanup.VsphereRmVms(context.Background(), getTestRunnerName(v.logger, c.JobID), executables.WithGovcEnvMap(v.envMap)) if deregisterError != nil { return fmt.Errorf("failed to decommission vsphere test runner ssm instance: %v", deregisterError) @@ -231,9 +231,9 @@ func (v *VSphereTestRunner) decommInstance(c instanceRunConf) error { } func (e *Ec2TestRunner) decommInstance(c instanceRunConf) error { - runnerName := getTestRunnerName(e.logger, c.jobId) + runnerName := getTestRunnerName(e.logger, c.JobID) e.logger.V(1).Info("Terminating ec2 Test Runner instance", "instanceID", e.InstanceID, "runner", runnerName) - if err := ec2.TerminateEc2Instances(c.session, aws.StringSlice([]string{e.InstanceID})); err != nil { + if err := ec2.TerminateEc2Instances(c.Session, aws.StringSlice([]string{e.InstanceID})); err != nil { return fmt.Errorf("terminating instance %s for runner %s: %w", e.InstanceID, runnerName, err) } diff --git a/test/framework/cluster.go b/test/framework/cluster.go index 2b5c2a6b2a26..631740a65f35 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -59,6 +59,7 @@ const ( JobIdVar = "T_JOB_ID" BundlesOverrideVar = "T_BUNDLES_OVERRIDE" ClusterIPPoolEnvVar = "T_CLUSTER_IP_POOL" + ClusterIPEnvVar = "T_CLUSTER_IP" CleanupVmsVar = "T_CLEANUP_VMS" hardwareYamlPath = "hardware.yaml" hardwareCsvPath = "hardware.csv" From f44c243f33c76ecbf504032998c49e3043d577a5 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 18 Apr 2024 16:30:53 -0700 Subject: [PATCH 075/193] Allow users to access docs for penultimate EKS-A version (#8008) --- docs/config.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/config.toml b/docs/config.toml index 69a4a531cb9d..ce11552441cf 100644 --- a/docs/config.toml +++ b/docs/config.toml @@ -184,3 +184,9 @@ fullversion = "v0.19" version = "v0.19" docsbranch = "main" url = "/docs/" + +[[params.versions]] +fullversion = "v0.18" +version = "v0.18" +docsbranch = "release-0.18" +url = "https://release-0-18.anywhere.eks.amazonaws.com/docs/" From 08f65e818aad618381e2fdd693eb1fcf501f6226 Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Thu, 18 Apr 2024 19:48:53 -0500 Subject: [PATCH 076/193] Log e2e instance test runner config (#8009) --- internal/test/e2e/run.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/internal/test/e2e/run.go b/internal/test/e2e/run.go index 197401caa56e..d1417f5c1b69 100644 --- a/internal/test/e2e/run.go +++ b/internal/test/e2e/run.go @@ -220,7 +220,14 @@ func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatal defer releaseTinkerbellHardware(&conf, hardwareCatalogue) } - conf.Logger.Info("Creating runner instance", "cfg", conf) + conf.Logger.Info("Creating runner instance", + "instance_profile_name", conf.InstanceProfileName, "storage_bucket", conf.StorageBucket, + "parent_job_id", conf.ParentJobID, "regex", conf.Regex, "test_report_folder", conf.TestReportFolder, + "branch_name", conf.BranchName, "ip_pool", conf.IPPool.ToString(), + "hardware_count", conf.HardwareCount, "tinkerbell_airgapped_test", conf.TinkerbellAirgappedTest, + "bundles_override", conf.BundlesOverride, "test_runner_type", conf.TestRunnerType, + "cleanup_vms", conf.CleanupVMs) + instanceId, err := testRunner.createInstance(conf) if err != nil { return "", nil, err From d335db691a71f3e9bf638be210a6ef65a0143e84 Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:40:53 -0700 Subject: [PATCH 077/193] Adding permissions for controller packages helm upgrade (#8012) --- config/manifest/eksa-components.yaml | 1 + config/rbac/role.yaml | 1 + controllers/cluster_controller.go | 2 +- pkg/curatedpackages/packagecontrollerclient.go | 8 +++++--- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index f7fe0c2459f4..6b287f2203f2 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -7140,6 +7140,7 @@ rules: - delete - get - list + - patch - update - watch - apiGroups: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index f5b81ea51e36..c678596cc00b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -54,6 +54,7 @@ rules: - delete - get - list + - patch - update - watch - apiGroups: diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index 11af51efe2fd..0a3612e0b074 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -170,7 +170,7 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager, log logr.Logger) } // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch;update -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;delete;update +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;delete;update;patch // +kubebuilder:rbac:groups="",namespace=eksa-system,resources=secrets,verbs=patch;update // +kubebuilder:rbac:groups="",resources=namespaces,verbs=create;delete // +kubebuilder:rbac:groups="",resources=nodes,verbs=list diff --git a/pkg/curatedpackages/packagecontrollerclient.go b/pkg/curatedpackages/packagecontrollerclient.go index b2deb0813e44..6d8e886ce7b6 100644 --- a/pkg/curatedpackages/packagecontrollerclient.go +++ b/pkg/curatedpackages/packagecontrollerclient.go @@ -310,9 +310,11 @@ func (pc *PackageControllerClient) generateHelmOverrideValues() ([]byte, error) endpoint, username, password, caCertContent, insecureSkipVerify := "", defaultRegistryMirrorUsername, defaultRegistryMirrorPassword, "", "false" if pc.registryMirror != nil { endpoint = pc.registryMirror.BaseRegistry - username, password, err = config.ReadCredentials() - if err != nil { - return []byte{}, err + if pc.registryMirror.Auth { + username, password, err = config.ReadCredentials() + if err != nil { + return []byte{}, err + } } caCertContent = pc.registryMirror.CACertContent if pc.registryMirror.InsecureSkipVerify { From ac9da3b1e262c785bf82c941e5dc3d7144cf2f33 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Fri, 19 Apr 2024 02:29:33 -0700 Subject: [PATCH 078/193] [PR BOT] Generate release testdata files (#8013) --- .../testdata/main-bundle-release.yaml | 110 +++++++++--------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 6f67e9767ad1..d7d8eec719bd 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -28,7 +28,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -152,7 +152,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -185,7 +185,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -209,7 +209,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -226,7 +226,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -261,7 +261,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -348,7 +348,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -372,7 +372,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml version: v1.0.20+abcdef1 @@ -764,7 +764,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -806,7 +806,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -930,7 +930,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -963,7 +963,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -987,7 +987,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -1004,7 +1004,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1039,7 +1039,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -1126,7 +1126,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -1150,7 +1150,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml version: v1.0.20+abcdef1 @@ -1542,7 +1542,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1584,7 +1584,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -1708,7 +1708,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1741,7 +1741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -1765,7 +1765,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -1782,7 +1782,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1817,7 +1817,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -1904,7 +1904,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -1928,7 +1928,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml version: v1.0.20+abcdef1 @@ -2320,7 +2320,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2362,7 +2362,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -2486,7 +2486,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2519,7 +2519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -2543,7 +2543,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -2560,7 +2560,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2595,7 +2595,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -2682,7 +2682,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -2706,7 +2706,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml version: v1.0.20+abcdef1 @@ -3098,7 +3098,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3140,7 +3140,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -3264,7 +3264,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3297,7 +3297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -3321,7 +3321,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -3338,7 +3338,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3373,7 +3373,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -3460,7 +3460,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -3484,7 +3484,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml version: v1.0.20+abcdef1 @@ -3876,7 +3876,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3918,7 +3918,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -4042,7 +4042,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -4075,7 +4075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -4099,7 +4099,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml version: v1.6.4+abcdef1 @@ -4116,7 +4116,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -4151,7 +4151,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.29.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.30.0/cri-tools-v0.0.0-dev-build.0-linux-amd64.tar.gz etcdadm: arch: - amd64 @@ -4238,7 +4238,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -4262,7 +4262,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml version: v1.0.20+abcdef1 @@ -4654,7 +4654,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.16.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -4672,7 +4672,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cloud-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.30.0-rc.0-eks-d-1-30-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.30.0-eks-d-1-30-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml version: v1.9.3+abcdef1 From 8ef6e92ff17fd6aa193e961e948aa322005bc729 Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Fri, 19 Apr 2024 11:36:36 -0500 Subject: [PATCH 079/193] Revert "Log in to public ECR for E2E test (#8001)" (#8015) This reverts commit 7bf18bb2f1d467f0b0e220b754206109c4611a4e. --- internal/test/e2e/ecr.go | 22 ---------------------- internal/test/e2e/setup.go | 5 ----- 2 files changed, 27 deletions(-) delete mode 100644 internal/test/e2e/ecr.go diff --git a/internal/test/e2e/ecr.go b/internal/test/e2e/ecr.go deleted file mode 100644 index 9738ba09e7a2..000000000000 --- a/internal/test/e2e/ecr.go +++ /dev/null @@ -1,22 +0,0 @@ -package e2e - -import ( - "fmt" - - "github.com/go-logr/logr" - - "github.com/aws/eks-anywhere/internal/pkg/ssm" -) - -func (e *E2ESession) loginToPublicECR() error { - e.logger.V(1).Info("Logging in to public ECR") - - command := "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws" - if err := ssm.Run(e.session, logr.Discard(), e.instanceId, command, ssmTimeout); err != nil { - return fmt.Errorf("sign in to public ecr: %v", err) - } - - e.logger.V(1).Info("Logged in to public ECR") - - return nil -} diff --git a/internal/test/e2e/setup.go b/internal/test/e2e/setup.go index a30f3635ef89..e6ea140a88ca 100644 --- a/internal/test/e2e/setup.go +++ b/internal/test/e2e/setup.go @@ -178,11 +178,6 @@ func (e *E2ESession) setup(regex string) error { return err } - err = e.loginToPublicECR() - if err != nil { - return err - } - ipPool := e.ipPool.ToString() if ipPool != "" { From 5831074905d0ac45b04e9c4415dfc97b3aa645e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 11:54:36 -0700 Subject: [PATCH 080/193] Bump golang.org/x/net in /test/e2e/tools/eks-anywhere-test-tool (#8014) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.17.0 to 0.23.0. - [Commits](https://github.com/golang/net/compare/v0.17.0...v0.23.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test/e2e/tools/eks-anywhere-test-tool/go.mod | 6 +++--- test/e2e/tools/eks-anywhere-test-tool/go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/test/e2e/tools/eks-anywhere-test-tool/go.mod b/test/e2e/tools/eks-anywhere-test-tool/go.mod index 123376bdf975..585f8df612e6 100644 --- a/test/e2e/tools/eks-anywhere-test-tool/go.mod +++ b/test/e2e/tools/eks-anywhere-test-tool/go.mod @@ -35,9 +35,9 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.22.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/test/e2e/tools/eks-anywhere-test-tool/go.sum b/test/e2e/tools/eks-anywhere-test-tool/go.sum index c8bdc3e009f5..15f256cfccb0 100644 --- a/test/e2e/tools/eks-anywhere-test-tool/go.sum +++ b/test/e2e/tools/eks-anywhere-test-tool/go.sum @@ -938,8 +938,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1046,8 +1046,8 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201113234701-d7a72108b828/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1061,8 +1061,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 18edabcb6a1d2315872865f60931777516f2747d Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Fri, 19 Apr 2024 14:03:33 -0700 Subject: [PATCH 081/193] Adding namespaces read permissions to eksa controller (#8017) --- config/manifest/eksa-components.yaml | 2 ++ config/rbac/role.yaml | 2 ++ controllers/cluster_controller.go | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index 6b287f2203f2..6eb5176e7194 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -7115,6 +7115,8 @@ rules: verbs: - create - delete + - get + - list - apiGroups: - "" resources: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index c678596cc00b..acd2ae7f0898 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -29,6 +29,8 @@ rules: verbs: - create - delete + - get + - list - apiGroups: - "" resources: diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index 0a3612e0b074..796f08752856 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -172,7 +172,7 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager, log logr.Logger) // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch;update // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;delete;update;patch // +kubebuilder:rbac:groups="",namespace=eksa-system,resources=secrets,verbs=patch;update -// +kubebuilder:rbac:groups="",resources=namespaces,verbs=create;delete +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;create;delete // +kubebuilder:rbac:groups="",resources=nodes,verbs=list // +kubebuilder:rbac:groups=addons.cluster.x-k8s.io,resources=clusterresourcesets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=clusters;gitopsconfigs;snowmachineconfigs;snowdatacenterconfigs;snowippools;vspheredatacenterconfigs;vspheremachineconfigs;dockerdatacenterconfigs;tinkerbellmachineconfigs;tinkerbelltemplateconfigs;tinkerbelldatacenterconfigs;cloudstackdatacenterconfigs;cloudstackmachineconfigs;nutanixdatacenterconfigs;nutanixmachineconfigs;awsiamconfigs;oidcconfigs;awsiamconfigs;fluxconfigs,verbs=get;list;watch;update;patch From 02b51c128357ab4d84d9e0f4e539a1e5a2125044 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Sun, 21 Apr 2024 21:38:35 -0700 Subject: [PATCH 082/193] [PR BOT] Generate release testdata files (#8019) --- .../testdata/main-bundle-release.yaml | 108 +++++++++--------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index d7d8eec719bd..121cc07bd806 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -161,7 +161,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 @@ -354,7 +354,7 @@ spec: version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -363,7 +363,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -374,8 +374,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml - version: v1.0.20+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -460,7 +460,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml version: v1.3.2+abcdef1 @@ -518,7 +518,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -562,7 +562,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -773,7 +773,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -939,7 +939,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 @@ -1132,7 +1132,7 @@ spec: version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -1141,7 +1141,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1152,8 +1152,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml - version: v1.0.20+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -1238,7 +1238,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml version: v1.3.2+abcdef1 @@ -1296,7 +1296,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1340,7 +1340,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -1551,7 +1551,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1717,7 +1717,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 @@ -1910,7 +1910,7 @@ spec: version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -1919,7 +1919,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1930,8 +1930,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml - version: v1.0.20+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -2016,7 +2016,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml version: v1.3.2+abcdef1 @@ -2074,7 +2074,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2118,7 +2118,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -2329,7 +2329,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2495,7 +2495,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 @@ -2688,7 +2688,7 @@ spec: version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -2697,7 +2697,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2708,8 +2708,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml - version: v1.0.20+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -2794,7 +2794,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml version: v1.3.2+abcdef1 @@ -2852,7 +2852,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2896,7 +2896,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -3107,7 +3107,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3273,7 +3273,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 @@ -3466,7 +3466,7 @@ spec: version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -3475,7 +3475,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3486,8 +3486,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml - version: v1.0.20+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -3572,7 +3572,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml version: v1.3.2+abcdef1 @@ -3630,7 +3630,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3674,7 +3674,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -3885,7 +3885,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -4051,7 +4051,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.10-rc1/metadata.yaml version: v0.4.10-rc1+abcdef1 @@ -4244,7 +4244,7 @@ spec: version: v1.0.13+abcdef1 etcdadmController: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/bootstrap-components.yaml controller: arch: - amd64 @@ -4253,7 +4253,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: etcdadm-controller os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.21-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -4264,8 +4264,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.20/metadata.yaml - version: v1.0.20+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml + version: v1.0.21+abcdef1 flux: helmController: arch: @@ -4350,7 +4350,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml version: v1.3.2+abcdef1 @@ -4408,7 +4408,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -4452,7 +4452,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: @@ -4663,7 +4663,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-vip os: linux - uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.7.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 From 443a57fe596b1057aecff9b94d309ee1f2c9c4d1 Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Mon, 22 Apr 2024 09:24:59 -0500 Subject: [PATCH 083/193] Add vShere test runner ovf options to logging in e2e (#8020) --- internal/test/e2e/testRunner.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/test/e2e/testRunner.go b/internal/test/e2e/testRunner.go index 7f6e9490e83a..9f9e9421afbb 100644 --- a/internal/test/e2e/testRunner.go +++ b/internal/test/e2e/testRunner.go @@ -2,6 +2,7 @@ package e2e import ( "context" + "encoding/json" "fmt" "os" "strconv" @@ -133,7 +134,6 @@ func (v *VSphereTestRunner) setEnvironment() (map[string]string, error) { func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { name := getTestRunnerName(v.logger, c.JobID) - v.logger.V(1).Info("Creating vSphere Test Runner instance", "name", name) ssmActivationInfo, err := ssm.CreateActivation(c.Session, name, c.InstanceProfileName) if err != nil { @@ -155,6 +155,11 @@ func (v *VSphereTestRunner) createInstance(c instanceRunConf) (string, error) { {Key: ssmActivationRegionKey, Value: *c.Session.Config.Region}, }, } + optsJSON, err := json.Marshal(opts) + if err != nil { + return "", err + } + v.logger.V(1).Info("Creating vSphere Test Runner instance", "name", name, "ovf_deployment_opts", optsJSON) // deploy template if err := vsphere.DeployTemplate(v.envMap, v.Library, v.Template, name, v.Folder, v.Datacenter, v.Datastore, v.ResourcePool, opts); err != nil { From 2267f7d51b1125c7e9c2e1ce08d5e0513a691a9b Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Mon, 22 Apr 2024 12:29:36 -0700 Subject: [PATCH 084/193] Navigate to same page when switching EKS-A versions (#8021) --- docs/config.toml | 4 ++-- docs/layouts/partials/navbar-version-selector.html | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 docs/layouts/partials/navbar-version-selector.html diff --git a/docs/config.toml b/docs/config.toml index ce11552441cf..8a1006cc1f09 100644 --- a/docs/config.toml +++ b/docs/config.toml @@ -183,10 +183,10 @@ desc = "Development takes place here!" fullversion = "v0.19" version = "v0.19" docsbranch = "main" -url = "/docs/" +url = "https://anywhere.eks.amazonaws.com" [[params.versions]] fullversion = "v0.18" version = "v0.18" docsbranch = "release-0.18" -url = "https://release-0-18.anywhere.eks.amazonaws.com/docs/" +url = "https://release-0-18.anywhere.eks.amazonaws.com" diff --git a/docs/layouts/partials/navbar-version-selector.html b/docs/layouts/partials/navbar-version-selector.html new file mode 100644 index 000000000000..6475f87141c0 --- /dev/null +++ b/docs/layouts/partials/navbar-version-selector.html @@ -0,0 +1,9 @@ + + \ No newline at end of file From 7bdba5bb254f80f1984d8efa359c71b4c0e0465d Mon Sep 17 00:00:00 2001 From: Xu Deng Date: Mon, 22 Apr 2024 16:08:37 -0400 Subject: [PATCH 085/193] Increase ssm wait time during e2e test (#8025) Co-authored-by: EKS Distro PR Bot --- internal/pkg/ssm/command.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/pkg/ssm/command.go b/internal/pkg/ssm/command.go index bd25e7abb910..60d50880e6d0 100644 --- a/internal/pkg/ssm/command.go +++ b/internal/pkg/ssm/command.go @@ -24,7 +24,7 @@ var initE2EDirCommand = "mkdir -p /home/e2e/bin && cd /home/e2e" // WaitForSSMReady waits for the SSM command to be ready. func WaitForSSMReady(session *session.Session, instanceID string, timeout time.Duration) error { - err := retrier.Retry(10, 20*time.Second, func() error { + err := retrier.Retry(20, 20*time.Second, func() error { return Run(session, logr.Discard(), instanceID, "ls", timeout) }) if err != nil { From d359b2550109a6e24ec275423498c3c8111a1dc5 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Mon, 22 Apr 2024 23:33:37 -0700 Subject: [PATCH 086/193] [PR BOT] Generate release testdata files (#8031) --- .../testdata/main-bundle-release.yaml | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 121cc07bd806..e9fd482d01df 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -473,12 +473,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -487,7 +487,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -496,8 +496,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 - version: v0.4.1+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 + version: v0.4.2+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -1251,12 +1251,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -1265,7 +1265,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -1274,8 +1274,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 - version: v0.4.1+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 + version: v0.4.2+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2029,12 +2029,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2043,7 +2043,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2052,8 +2052,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 - version: v0.4.1+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 + version: v0.4.2+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2807,12 +2807,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2821,7 +2821,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2830,8 +2830,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 - version: v0.4.1+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 + version: v0.4.2+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -3585,12 +3585,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -3599,7 +3599,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -3608,8 +3608,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 - version: v0.4.1+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 + version: v0.4.2+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -4363,12 +4363,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -4377,7 +4377,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -4386,8 +4386,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.1-eks-a-v0.0.0-dev-build.1 - version: v0.4.1+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 + version: v0.4.2+abcdef1 snow: bottlerocketBootstrapSnow: arch: From de99d121cf3d176a445d8f0ed67543b7aadf8a73 Mon Sep 17 00:00:00 2001 From: Vivek Koppuru Date: Tue, 23 Apr 2024 00:51:37 -0700 Subject: [PATCH 087/193] Add check for nil eksaversion when setting etcd url (#8018) --- pkg/clusterapi/etcd.go | 2 +- pkg/clusterapi/etcd_test.go | 6 ++---- pkg/providers/cloudstack/template.go | 2 +- pkg/providers/common/common.go | 8 ++++++-- pkg/providers/common/common_test.go | 10 +++++++++- pkg/providers/docker/docker.go | 2 +- pkg/providers/nutanix/template.go | 2 +- pkg/providers/snow/apibuilder.go | 2 +- pkg/providers/tinkerbell/template.go | 2 +- pkg/providers/vsphere/template.go | 2 +- 10 files changed, 24 insertions(+), 14 deletions(-) diff --git a/pkg/clusterapi/etcd.go b/pkg/clusterapi/etcd.go index 33edaa06914f..94f58f0bce80 100644 --- a/pkg/clusterapi/etcd.go +++ b/pkg/clusterapi/etcd.go @@ -15,7 +15,7 @@ import ( ) // SetUbuntuConfigInEtcdCluster sets up the etcd config in EtcdadmCluster. -func SetUbuntuConfigInEtcdCluster(etcd *etcdv1.EtcdadmCluster, versionsBundle *cluster.VersionsBundle, eksaVersion string) { +func SetUbuntuConfigInEtcdCluster(etcd *etcdv1.EtcdadmCluster, versionsBundle *cluster.VersionsBundle, eksaVersion *v1alpha1.EksaVersion) { etcd.Spec.EtcdadmConfigSpec.Format = etcdbootstrapv1.Format("cloud-config") etcd.Spec.EtcdadmConfigSpec.CloudInitConfig = &etcdbootstrapv1.CloudInitConfig{ Version: versionsBundle.KubeDistro.EtcdVersion, diff --git a/pkg/clusterapi/etcd_test.go b/pkg/clusterapi/etcd_test.go index 2b7c5dd48241..72f4a87c31ea 100644 --- a/pkg/clusterapi/etcd_test.go +++ b/pkg/clusterapi/etcd_test.go @@ -15,7 +15,6 @@ import ( func TestSetUbuntuConfigInEtcdCluster(t *testing.T) { g := newApiBuilerTest(t) eksaVersion := anywherev1.EksaVersion("v0.19.2") - g.clusterSpec.Cluster.Spec.EksaVersion = &eksaVersion got := wantEtcdCluster() versionBundle := g.clusterSpec.VersionsBundles["1.21"] @@ -26,14 +25,13 @@ func TestSetUbuntuConfigInEtcdCluster(t *testing.T) { InstallDir: "/usr/bin", EtcdReleaseURL: versionBundle.KubeDistro.EtcdURL, } - clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, string(eksaVersion)) + clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, &eksaVersion) g.Expect(got).To(Equal(want)) } func TestSetUbuntuConfigInEtcdClusterNoEtcdUrl(t *testing.T) { g := newApiBuilerTest(t) eksaVersion := anywherev1.EksaVersion("v0.18.2") - g.clusterSpec.Cluster.Spec.EksaVersion = &eksaVersion got := wantEtcdCluster() versionBundle := g.clusterSpec.VersionsBundles["1.21"] @@ -43,7 +41,7 @@ func TestSetUbuntuConfigInEtcdClusterNoEtcdUrl(t *testing.T) { Version: versionBundle.KubeDistro.EtcdVersion, InstallDir: "/usr/bin", } - clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, string(eksaVersion)) + clusterapi.SetUbuntuConfigInEtcdCluster(got, versionBundle, &eksaVersion) g.Expect(got).To(Equal(want)) } diff --git a/pkg/providers/cloudstack/template.go b/pkg/providers/cloudstack/template.go index 53b4cce84cb4..ed4149947047 100644 --- a/pkg/providers/cloudstack/template.go +++ b/pkg/providers/cloudstack/template.go @@ -231,7 +231,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro values["externalEtcd"] = true values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } diff --git a/pkg/providers/common/common.go b/pkg/providers/common/common.go index 70803b776535..9a1ebd6d52ef 100644 --- a/pkg/providers/common/common.go +++ b/pkg/providers/common/common.go @@ -152,8 +152,12 @@ func GetCAPIBottlerocketSettingsConfig(config *v1alpha1.BottlerocketConfiguratio // GetExternalEtcdReleaseURL returns a valid etcd URL from version bundles if the eksaVersion is greater than // MinEksAVersionWithEtcdURL. Return "" if eksaVersion < MinEksAVersionWithEtcdURL to prevent etcd node rolled out. -func GetExternalEtcdReleaseURL(clusterVersion string, versionBundle *cluster.VersionsBundle) (string, error) { - clusterVersionSemVer, err := semver.New(clusterVersion) +func GetExternalEtcdReleaseURL(clusterVersion *v1alpha1.EksaVersion, versionBundle *cluster.VersionsBundle) (string, error) { + if clusterVersion == nil { + logger.V(4).Info("Eks-a cluster version is not specified. Skip setting etcd url") + return "", nil + } + clusterVersionSemVer, err := semver.New(string(*clusterVersion)) if err != nil { return "", fmt.Errorf("invalid semver for clusterVersion: %v", err) } diff --git a/pkg/providers/common/common_test.go b/pkg/providers/common/common_test.go index 0b659999b203..79add9c0a595 100644 --- a/pkg/providers/common/common_test.go +++ b/pkg/providers/common/common_test.go @@ -175,7 +175,8 @@ func TestGetExternalEtcdReleaseURL(t *testing.T) { } for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { - got, err := common.GetExternalEtcdReleaseURL(tt.clusterVersion, test.VersionBundle()) + eksaVersion := v1alpha1.EksaVersion(tt.clusterVersion) + got, err := common.GetExternalEtcdReleaseURL(&eksaVersion, test.VersionBundle()) if tt.err == nil { g.Expect(err).ToNot(HaveOccurred()) } else { @@ -185,3 +186,10 @@ func TestGetExternalEtcdReleaseURL(t *testing.T) { }) } } + +func TestGetExternalEtcdReleaseURLWithNilEksaVersion(t *testing.T) { + g := NewWithT(t) + got, err := common.GetExternalEtcdReleaseURL(nil, test.VersionBundle()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeEmpty()) +} diff --git a/pkg/providers/docker/docker.go b/pkg/providers/docker/docker.go index b74665a028b3..6fc48d335f9a 100644 --- a/pkg/providers/docker/docker.go +++ b/pkg/providers/docker/docker.go @@ -329,7 +329,7 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro if clusterSpec.Cluster.Spec.ExternalEtcdConfiguration != nil { values["externalEtcd"] = true values["externalEtcdReplicas"] = clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index 5b33ad2c90fb..f8bfe8738d1b 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -297,7 +297,7 @@ func buildTemplateMapCP( values["maxSurge"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } diff --git a/pkg/providers/snow/apibuilder.go b/pkg/providers/snow/apibuilder.go index 5e4f294f3f02..65ecd519ce2d 100644 --- a/pkg/providers/snow/apibuilder.go +++ b/pkg/providers/snow/apibuilder.go @@ -183,7 +183,7 @@ func EtcdadmCluster(log logr.Logger, clusterSpec *cluster.Spec, snowMachineTempl clusterapi.SetBottlerocketHostConfigInEtcdCluster(etcd, machineConfig.Spec.HostOSConfiguration) case v1alpha1.Ubuntu: - clusterapi.SetUbuntuConfigInEtcdCluster(etcd, versionsBundle, string(*clusterSpec.Cluster.Spec.EksaVersion)) + clusterapi.SetUbuntuConfigInEtcdCluster(etcd, versionsBundle, clusterSpec.Cluster.Spec.EksaVersion) etcd.Spec.EtcdadmConfigSpec.PreEtcdadmCommands = append(etcd.Spec.EtcdadmConfigSpec.PreEtcdadmCommands, "/etc/eks/bootstrap.sh", ) diff --git a/pkg/providers/tinkerbell/template.go b/pkg/providers/tinkerbell/template.go index 3a6c14309028..1bae93348efe 100644 --- a/pkg/providers/tinkerbell/template.go +++ b/pkg/providers/tinkerbell/template.go @@ -476,7 +476,7 @@ func buildTemplateMapCP( values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name values["etcdTemplateOverride"] = etcdTemplateOverride values["etcdHardwareSelector"] = etcdMachineSpec.HardwareSelector - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index 50048573d8e5..4be5dd51d9c2 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -308,7 +308,7 @@ func buildTemplateMapCP( } } } - etcdURL, _ := common.GetExternalEtcdReleaseURL(string(*clusterSpec.Cluster.Spec.EksaVersion), versionsBundle) + etcdURL, _ := common.GetExternalEtcdReleaseURL(clusterSpec.Cluster.Spec.EksaVersion, versionsBundle) if etcdURL != "" { values["externalEtcdReleaseUrl"] = etcdURL } From 7e4a242f500a7e73d7b76d3ef36b73c69163cb2a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:03:45 -0700 Subject: [PATCH 088/193] Bump k8s.io/apimachinery in /release/cli in the kubernetes group (#8035) Bumps the kubernetes group in /release/cli with 1 update: [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery). Updates `k8s.io/apimachinery` from 0.29.3 to 0.29.4 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.29.3...v0.29.4) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 8f12b7a5d334..71378d4232b4 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -17,7 +17,7 @@ require ( github.com/spf13/viper v1.18.2 golang.org/x/sync v0.7.0 helm.sh/helm/v3 v3.14.4 - k8s.io/apimachinery v0.29.3 + k8s.io/apimachinery v0.29.4 k8s.io/helm v2.17.0+incompatible sigs.k8s.io/controller-runtime v0.16.5 sigs.k8s.io/yaml v1.4.0 diff --git a/release/cli/go.sum b/release/cli/go.sum index 2b1f22eb4452..591e2e7d0e88 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -879,8 +879,8 @@ k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdB k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= +k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= From b6792742fabc7cc5f4befaa6bbaaba3a9c5cb0cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:24:44 -0700 Subject: [PATCH 089/193] Bump github.com/aws/aws-sdk-go from 1.51.22 to 1.51.27 in /release/cli (#8036) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.51.22 to 1.51.27. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.51.22...v1.51.27) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 71378d4232b4..983ac070c71d 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.51.22 + github.com/aws/aws-sdk-go v1.51.27 github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index 591e2e7d0e88..35e2b23c9b1f 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.22 h1:VL2p2JgC32myt7DMEcbe1devdtgGSgMNvZpkcdvlxq4= -github.com/aws/aws-sdk-go v1.51.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.27 h1:ZprksHovT4rFfNBHB+Bc/0p4PTntAnTlZP39DMA/Qp8= +github.com/aws/aws-sdk-go v1.51.27/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From 75b100396b076347f349642a93e727ca10d014b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:24:50 -0700 Subject: [PATCH 090/193] Bump github.com/onsi/gomega from 1.32.0 to 1.33.0 in /release/cli (#8037) Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.32.0 to 1.33.0. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.32.0...v1.33.0) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 983ac070c71d..1c5198b33ec6 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -11,7 +11,7 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.4.1 github.com/mitchellh/go-homedir v1.1.0 - github.com/onsi/gomega v1.32.0 + github.com/onsi/gomega v1.33.0 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 diff --git a/release/cli/go.sum b/release/cli/go.sum index 35e2b23c9b1f..a7354a8a93ce 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -490,13 +490,13 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= +github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= From 02a8ac74214aaeb6c76aa8f60258769ff7540c3a Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 23 Apr 2024 12:39:38 -0700 Subject: [PATCH 091/193] Add release logic for Ubuntu RTOS image (#8006) --- release/cli/cmd/release.go | 4 +- release/cli/pkg/assets/archives/archives.go | 55 +++++++++++++++++++ .../cli/pkg/assets/config/bundle_release.go | 16 ++++++ release/cli/pkg/assets/manifests/manifests.go | 1 + release/cli/pkg/assets/types/types.go | 2 + release/cli/pkg/aws/s3/s3.go | 9 ++- release/cli/pkg/filereader/file_reader.go | 2 +- release/cli/pkg/operations/upload.go | 6 +- release/cli/pkg/types/types.go | 2 + 9 files changed, 89 insertions(+), 8 deletions(-) diff --git a/release/cli/cmd/release.go b/release/cli/cmd/release.go index 79775e8dd61b..60fddb3f12fa 100644 --- a/release/cli/cmd/release.go +++ b/release/cli/cmd/release.go @@ -236,7 +236,7 @@ var releaseCmd = &cobra.Command{ } bundleReleaseManifestKey := releaseConfig.BundlesManifestFilepath() - err = s3.UploadFile(bundleReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(bundleReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader) + err = s3.UploadFile(bundleReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(bundleReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader, false) if err != nil { fmt.Printf("Error uploading bundle manifest to release bucket: %+v", err) os.Exit(1) @@ -328,7 +328,7 @@ var releaseCmd = &cobra.Command{ } eksAReleaseManifestKey := releaseConfig.ReleaseManifestFilepath() - err = s3.UploadFile(eksAReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(eksAReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader) + err = s3.UploadFile(eksAReleaseManifestFile, aws.String(releaseConfig.ReleaseBucket), aws.String(eksAReleaseManifestKey), releaseConfig.ReleaseClients.S3.Uploader, false) if err != nil { fmt.Printf("Error uploading EKS-A release manifest to release bucket: %v", err) os.Exit(1) diff --git a/release/cli/pkg/assets/archives/archives.go b/release/cli/pkg/assets/archives/archives.go index cf175f491f11..e9d766eb8256 100644 --- a/release/cli/pkg/assets/archives/archives.go +++ b/release/cli/pkg/assets/archives/archives.go @@ -145,6 +145,60 @@ func KernelArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettype return sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, nil } +func RTOSArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion, latestPath, arch string) (string, string, string, string, error) { + var sourceS3Key string + var sourceS3Prefix string + var releaseS3Path string + var releaseName string + + imageExtensions := map[string]string{ + "ami": "gz", + "ova": "ova", + "raw": "gz", + } + imageExtension := imageExtensions[archive.Format] + + if rc.DevRelease || rc.ReleaseEnvironment == "development" { + sourceS3Key = fmt.Sprintf("%s.%s", archive.OSName, imageExtension) + sourceS3Prefix = fmt.Sprintf("%s/%s/%s/%s/%s/%s", projectPath, eksDReleaseChannel, archive.Format, archive.OSName, archive.OSVersion, latestPath) + } else { + sourceS3Key = fmt.Sprintf("%s-%s-eks-a-%d-%s.%s", + archive.OSName, + eksDReleaseChannel, + rc.BundleNumber, + arch, + imageExtension, + ) + sourceS3Prefix = fmt.Sprintf("releases/bundles/%d/artifacts/rtos/%s", rc.BundleNumber, eksDReleaseChannel) + } + + if rc.DevRelease { + releaseName = fmt.Sprintf("%s-%s-eks-a-%s-%s.%s", + archive.OSName, + eksDReleaseChannel, + rc.DevReleaseUriVersion, + arch, + imageExtension, + ) + releaseS3Path = fmt.Sprintf("artifacts/%s/rtos/%s/%s", + rc.DevReleaseUriVersion, + archive.Format, + eksDReleaseChannel, + ) + } else { + releaseName = fmt.Sprintf("%s-%s-eks-a-%d-%s.%s", + archive.OSName, + eksDReleaseChannel, + rc.BundleNumber, + arch, + imageExtension, + ) + releaseS3Path = fmt.Sprintf("releases/bundles/%d/artifacts/rtos/%s", rc.BundleNumber, eksDReleaseChannel) + } + + return sourceS3Key, sourceS3Prefix, releaseName, releaseS3Path, nil +} + func GetArchiveAssets(rc *releasetypes.ReleaseConfig, archive *assettypes.Archive, projectPath, gitTag, eksDReleaseChannel, eksDReleaseNumber, kubeVersion string) (*releasetypes.ArchiveArtifact, error) { os := "linux" arch := "amd64" @@ -181,6 +235,7 @@ func GetArchiveAssets(rc *releasetypes.ReleaseConfig, archive *assettypes.Archiv ProjectPath: projectPath, SourcedFromBranch: sourcedFromBranch, ImageFormat: archive.Format, + PrivateUpload: archive.Private, } return archiveArtifact, nil diff --git a/release/cli/pkg/assets/config/bundle_release.go b/release/cli/pkg/assets/config/bundle_release.go index e2e78c9adc70..10fa9526dc22 100644 --- a/release/cli/pkg/assets/config/bundle_release.go +++ b/release/cli/pkg/assets/config/bundle_release.go @@ -66,6 +66,22 @@ var bundleReleaseAssetsConfigMap = []assettypes.AssetConfig{ }, HasReleaseBranches: true, }, + // Canonical Ubuntu RTOS artifacts + { + ProjectName: "ubuntu-rtos", + ProjectPath: "projects/canonical/ubuntu", + GitTagAssigner: tagger.NonExistentTagAssigner, + Archives: []*assettypes.Archive{ + { + Name: "rtos", + Format: "ami", + OSName: "ubuntu", + OSVersion: "22.04", + ArchiveS3PathGetter: archives.RTOSArtifactPathGetter, + Private: true, + }, + }, + }, // Cert-manager artifacts { ProjectName: "cert-manager", diff --git a/release/cli/pkg/assets/manifests/manifests.go b/release/cli/pkg/assets/manifests/manifests.go index b0333797d91f..7407f8d49f9f 100644 --- a/release/cli/pkg/assets/manifests/manifests.go +++ b/release/cli/pkg/assets/manifests/manifests.go @@ -68,6 +68,7 @@ func GetManifestAssets(rc *releasetypes.ReleaseConfig, manifestComponent *assett ProjectPath: projectPath, SourcedFromBranch: sourcedFromBranch, Component: componentName, + PrivateUpload: manifestComponent.Private, } return manifestArtifact, nil diff --git a/release/cli/pkg/assets/types/types.go b/release/cli/pkg/assets/types/types.go index 413b7d3add35..cf34c6756a9d 100644 --- a/release/cli/pkg/assets/types/types.go +++ b/release/cli/pkg/assets/types/types.go @@ -23,6 +23,7 @@ type ManifestComponent struct { ReleaseManifestPrefix string ManifestFiles []string NoVersionSuffix bool + Private bool } type ImageTagConfiguration struct { @@ -47,6 +48,7 @@ type Archive struct { OSVersion string ArchitectureOverride string ArchiveS3PathGetter ArchiveS3PathGenerator + Private bool } type AssetConfig struct { diff --git a/release/cli/pkg/aws/s3/s3.go b/release/cli/pkg/aws/s3/s3.go index 34b7eb9d5f81..ecb922321c5a 100644 --- a/release/cli/pkg/aws/s3/s3.go +++ b/release/cli/pkg/aws/s3/s3.go @@ -22,6 +22,7 @@ import ( "path/filepath" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/pkg/errors" ) @@ -67,18 +68,22 @@ func DownloadFile(filePath, bucket, key string) error { return nil } -func UploadFile(filePath string, bucket, key *string, s3Uploader *s3manager.Uploader) error { +func UploadFile(filePath string, bucket, key *string, s3Uploader *s3manager.Uploader, private bool) error { fd, err := os.Open(filePath) if err != nil { return errors.Cause(err) } defer fd.Close() + objectCannedACL := s3.ObjectCannedACLPublicRead + if private { + objectCannedACL = s3.ObjectCannedACLPrivate + } result, err := s3Uploader.Upload(&s3manager.UploadInput{ Bucket: bucket, Key: key, Body: fd, - ACL: aws.String("public-read"), + ACL: aws.String(objectCannedACL), }) if err != nil { return errors.Cause(err) diff --git a/release/cli/pkg/filereader/file_reader.go b/release/cli/pkg/filereader/file_reader.go index a38470d1d9a9..9ad874f468d2 100644 --- a/release/cli/pkg/filereader/file_reader.go +++ b/release/cli/pkg/filereader/file_reader.go @@ -286,7 +286,7 @@ func PutEksAReleaseVersion(version string, r *releasetypes.ReleaseConfig) error // Upload the file to S3 fmt.Println("Uploading latest release version file") - err = s3.UploadFile(currentReleaseKey, aws.String(r.ReleaseBucket), aws.String(currentReleaseKey), r.ReleaseClients.S3.Uploader) + err = s3.UploadFile(currentReleaseKey, aws.String(r.ReleaseBucket), aws.String(currentReleaseKey), r.ReleaseClients.S3.Uploader, false) if err != nil { return errors.Cause(err) } diff --git a/release/cli/pkg/operations/upload.go b/release/cli/pkg/operations/upload.go index 20926d419a7b..bf972ad7646e 100644 --- a/release/cli/pkg/operations/upload.go +++ b/release/cli/pkg/operations/upload.go @@ -92,7 +92,7 @@ func handleArchiveUpload(_ context.Context, r *releasetypes.ReleaseConfig, artif archiveFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName) fmt.Printf("Archive - %s\n", archiveFile) key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName) - err := s3.UploadFile(archiveFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader) + err := s3.UploadFile(archiveFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.PrivateUpload) if err != nil { return errors.Cause(err) } @@ -109,7 +109,7 @@ func handleArchiveUpload(_ context.Context, r *releasetypes.ReleaseConfig, artif checksumFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName) + extension fmt.Printf("Checksum - %s\n", checksumFile) key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName) + extension - err := s3.UploadFile(checksumFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader) + err := s3.UploadFile(checksumFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.PrivateUpload) if err != nil { return errors.Cause(err) } @@ -122,7 +122,7 @@ func handleManifestUpload(_ context.Context, r *releasetypes.ReleaseConfig, arti manifestFile := filepath.Join(artifact.Manifest.ArtifactPath, artifact.Manifest.ReleaseName) fmt.Printf("Manifest - %s\n", manifestFile) key := filepath.Join(artifact.Manifest.ReleaseS3Path, artifact.Manifest.ReleaseName) - err := s3.UploadFile(manifestFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader) + err := s3.UploadFile(manifestFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Manifest.PrivateUpload) if err != nil { return errors.Cause(err) } diff --git a/release/cli/pkg/types/types.go b/release/cli/pkg/types/types.go index ab136dc4ac1d..3bb7f374b8c6 100644 --- a/release/cli/pkg/types/types.go +++ b/release/cli/pkg/types/types.go @@ -77,6 +77,7 @@ type ArchiveArtifact struct { ProjectPath string SourcedFromBranch string ImageFormat string + PrivateUpload bool } type ImageArtifact struct { @@ -102,6 +103,7 @@ type ManifestArtifact struct { ProjectPath string SourcedFromBranch string Component string + PrivateUpload bool } type Artifact struct { From a0d5b64c1bdae2d367bb0cd24530dacd846949f2 Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Tue, 23 Apr 2024 15:29:38 -0700 Subject: [PATCH 092/193] Docs update for BR deprecation for Baremetal (#8039) Co-authored-by: EKS Distro PR Bot --- docs/content/en/docs/osmgmt/overview.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/docs/osmgmt/overview.md b/docs/content/en/docs/osmgmt/overview.md index e17a172cb014..3d09a04734d3 100644 --- a/docs/content/en/docs/osmgmt/overview.md +++ b/docs/content/en/docs/osmgmt/overview.md @@ -12,7 +12,7 @@ Reference the table below for the operating systems supported per deployment opt || vSphere | Bare metal | Snow | CloudStack | Nutanix | | --- | :---: | :---: | :---: | :---: | :---: | -| Bottlerocket | ✔ | ✔ | — | — | — | +| Bottlerocket | ✔ | — | — | — | — | | Ubuntu | ✔ | ✔ | ✔ | — | ✔ | | RHEL | ✔ | ✔ | — | ✔ | ✔ | From a2ec37f9a19fd7ddb835738c934cbb4eae74e13c Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 24 Apr 2024 03:20:23 -0700 Subject: [PATCH 093/193] [PR BOT] Generate release testdata files (#8038) --- .../testdata/main-bundle-release.yaml | 288 +++++++++--------- 1 file changed, 144 insertions(+), 144 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index e9fd482d01df..edec434058bc 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -10,7 +10,7 @@ spec: versionsBundles: - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml controller: arch: - amd64 @@ -19,7 +19,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -30,8 +30,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 bottlerocketHostContainers: admin: arch: @@ -167,7 +167,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml controller: arch: - amd64 @@ -176,7 +176,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -187,11 +187,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml controller: arch: - amd64 @@ -200,7 +200,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -211,13 +211,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -235,10 +235,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 eksD: ami: bottlerocket: {} @@ -648,7 +648,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -657,18 +657,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -677,18 +677,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -788,7 +788,7 @@ spec: version: v1.9.3+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml controller: arch: - amd64 @@ -797,7 +797,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -808,8 +808,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 bottlerocketHostContainers: admin: arch: @@ -945,7 +945,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml controller: arch: - amd64 @@ -954,7 +954,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -965,11 +965,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml controller: arch: - amd64 @@ -978,7 +978,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -989,13 +989,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1013,10 +1013,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 eksD: ami: bottlerocket: {} @@ -1426,7 +1426,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -1435,18 +1435,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -1455,18 +1455,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -1566,7 +1566,7 @@ spec: version: v1.9.3+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml controller: arch: - amd64 @@ -1575,7 +1575,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1586,8 +1586,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 bottlerocketHostContainers: admin: arch: @@ -1723,7 +1723,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml controller: arch: - amd64 @@ -1732,7 +1732,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1743,11 +1743,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml controller: arch: - amd64 @@ -1756,7 +1756,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1767,13 +1767,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1791,10 +1791,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 eksD: ami: bottlerocket: {} @@ -2204,7 +2204,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -2213,18 +2213,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -2233,18 +2233,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -2344,7 +2344,7 @@ spec: version: v1.9.3+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml controller: arch: - amd64 @@ -2353,7 +2353,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2364,8 +2364,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 bottlerocketHostContainers: admin: arch: @@ -2501,7 +2501,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml controller: arch: - amd64 @@ -2510,7 +2510,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2521,11 +2521,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml controller: arch: - amd64 @@ -2534,7 +2534,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2545,13 +2545,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -2569,10 +2569,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 eksD: ami: bottlerocket: {} @@ -2982,7 +2982,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -2991,18 +2991,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -3011,18 +3011,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -3122,7 +3122,7 @@ spec: version: v1.9.3+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml controller: arch: - amd64 @@ -3131,7 +3131,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3142,8 +3142,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 bottlerocketHostContainers: admin: arch: @@ -3279,7 +3279,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml controller: arch: - amd64 @@ -3288,7 +3288,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3299,11 +3299,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml controller: arch: - amd64 @@ -3312,7 +3312,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3323,13 +3323,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -3347,10 +3347,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 eksD: ami: bottlerocket: {} @@ -3760,7 +3760,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -3769,18 +3769,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -3789,18 +3789,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 @@ -3900,7 +3900,7 @@ spec: version: v1.9.3+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml controller: arch: - amd64 @@ -3909,7 +3909,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3920,8 +3920,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 bottlerocketHostContainers: admin: arch: @@ -4057,7 +4057,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml controller: arch: - amd64 @@ -4066,7 +4066,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -4077,11 +4077,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml controller: arch: - amd64 @@ -4090,7 +4090,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -4101,13 +4101,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -4125,10 +4125,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.6.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.6.4/metadata.yaml - version: v1.6.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml + version: v1.7.1+abcdef1 eksD: ami: bottlerocket: {} @@ -4538,7 +4538,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-bootkit os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-bootkit:v0.8.1-eks-a-v0.0.0-dev-build.1 docker: arch: - amd64 @@ -4547,18 +4547,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-docker os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-docker:v0.8.1-eks-a-v0.0.0-dev-build.1 initramfs: amd: description: Tinkerbell operating system installation environment (osie) component name: initramfs-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: initramfs-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/initramfs-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/initramfs-aarch64 kernel: arch: - amd64 @@ -4567,18 +4567,18 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: hook-kernel os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:9d54933a03f2f4c06322969b06caa18702d17f66-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hook-kernel:v0.8.1-eks-a-v0.0.0-dev-build.1 vmlinuz: amd: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-x86_64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-x86_64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-x86_64 arm: description: Tinkerbell operating system installation environment (osie) component name: vmlinuz-aarch64 - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/9d54933a03f2f4c06322969b06caa18702d17f66/vmlinuz-aarch64 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/hook/v0.8.1/vmlinuz-aarch64 rufio: arch: - amd64 From 305e2bb376af286f0118d589ae23e7c31be23d33 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Wed, 24 Apr 2024 03:30:22 -0700 Subject: [PATCH 094/193] Fix dev release, log paths of missing artifacts (#8041) --- release/cli/pkg/assets/archives/archives.go | 2 +- release/cli/pkg/images/images.go | 6 +++--- release/cli/pkg/operations/download.go | 8 ++++---- release/cli/pkg/util/artifacts/artifacts.go | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/release/cli/pkg/assets/archives/archives.go b/release/cli/pkg/assets/archives/archives.go index e9d766eb8256..4dc1046ce884 100644 --- a/release/cli/pkg/assets/archives/archives.go +++ b/release/cli/pkg/assets/archives/archives.go @@ -160,7 +160,7 @@ func RTOSArtifactPathGetter(rc *releasetypes.ReleaseConfig, archive *assettypes. if rc.DevRelease || rc.ReleaseEnvironment == "development" { sourceS3Key = fmt.Sprintf("%s.%s", archive.OSName, imageExtension) - sourceS3Prefix = fmt.Sprintf("%s/%s/%s/%s/%s/%s", projectPath, eksDReleaseChannel, archive.Format, archive.OSName, archive.OSVersion, latestPath) + sourceS3Prefix = fmt.Sprintf("%s/%s", projectPath, latestPath) } else { sourceS3Key = fmt.Sprintf("%s-%s-eks-a-%d-%s.%s", archive.OSName, diff --git a/release/cli/pkg/images/images.go b/release/cli/pkg/images/images.go index f9324355ba76..eba40a04bc84 100644 --- a/release/cli/pkg/images/images.go +++ b/release/cli/pkg/images/images.go @@ -83,13 +83,13 @@ func PollForExistence(devRelease bool, authConfig *docker.AuthConfiguration, ima bodyStr := string(body) if strings.Contains(bodyStr, "MANIFEST_UNKNOWN") { - return fmt.Errorf("Requested image not found") + return fmt.Errorf("requested image not found") } return nil }) if err != nil { - return fmt.Errorf("retries exhausted waiting for source image %s to be available for copy: %v", imageUri, err) + return fmt.Errorf("retries exhausted waiting for source image [%s] to be available for copy: %v", imageUri, err) } return nil @@ -118,7 +118,7 @@ func CopyToDestination(sourceAuthConfig, releaseAuthConfig *docker.AuthConfigura return nil }) if err != nil { - return fmt.Errorf("retries exhausted performing image copy from source to destination: %v", err) + return fmt.Errorf("retries exhausted performing image copy from source [%s] to destination [%s]: %v", sourceImageUri, releaseImageUri, err) } return nil diff --git a/release/cli/pkg/operations/download.go b/release/cli/pkg/operations/download.go index 6708f852d4d2..64c5b63be6f8 100644 --- a/release/cli/pkg/operations/download.go +++ b/release/cli/pkg/operations/download.go @@ -107,7 +107,7 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art } objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key) } else { - return fmt.Errorf("retries exhausted waiting for archive to be uploaded to source location: %v", err) + return fmt.Errorf("retries exhausted waiting for archive [%s] to be uploaded to source location: %v", objectKey, err) } } @@ -156,7 +156,7 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art } objectShasumFileKey = filepath.Join(latestSourceS3PrefixFromMain, objectShasumFileName) } else { - return fmt.Errorf("retries exhausted waiting for checksum file to be uploaded to source location: %v", err) + return fmt.Errorf("retries exhausted waiting for checksum file [%s] to be uploaded to source location: %v", objectShasumFileKey, err) } } @@ -180,7 +180,7 @@ func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, ar err := s3Retrier.Retry(func() error { if !s3.KeyExists(r.SourceBucket, objectKey) { - return fmt.Errorf("Requested object not found") + return fmt.Errorf("requested object not found") } return nil }) @@ -194,7 +194,7 @@ func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, ar latestSourceS3PrefixFromMain := strings.NewReplacer(r.BuildRepoBranchName, "latest", artifact.Manifest.GitTag, gitTagFromMain).Replace(sourceS3Prefix) objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key) } else { - return fmt.Errorf("retries exhausted waiting for archive to be uploaded to source location: %v", err) + return fmt.Errorf("retries exhausted waiting for manifest [%s] to be uploaded to source location: %v", objectKey, err) } } diff --git a/release/cli/pkg/util/artifacts/artifacts.go b/release/cli/pkg/util/artifacts/artifacts.go index afbbe2a01d2d..55c2fc309c82 100644 --- a/release/cli/pkg/util/artifacts/artifacts.go +++ b/release/cli/pkg/util/artifacts/artifacts.go @@ -23,11 +23,11 @@ import ( ) func IsObjectNotFoundError(err error) bool { - return err.Error() == "Requested object not found" + return err.Error() == "requested object not found" } func IsImageNotFoundError(err error) bool { - return err.Error() == "Requested image not found" + return err.Error() == "requested image not found" } func GetFakeSHA(hashType int) (string, error) { From 882ad80f8e998ac4ef397feb332714edae7a3b51 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Thu, 25 Apr 2024 01:33:37 -0700 Subject: [PATCH 095/193] [PR BOT] Generate release testdata files (#8044) --- .../testdata/main-bundle-release.yaml | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index edec434058bc..3569d00f0ff7 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -57,7 +57,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-37-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -289,10 +289,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-37-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.25.16 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-35.yaml - name: kubernetes-1-25-eks-35 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-37.yaml + name: kubernetes-1-25-eks-37 ova: bottlerocket: {} raw: @@ -507,7 +507,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-37-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -741,7 +741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-37-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -835,7 +835,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-31-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-33-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1067,10 +1067,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.14-eks-d-1-26-31-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.14-eks-d-1-26-33-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.26.14 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-31.yaml - name: kubernetes-1-26-eks-31 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-33.yaml + name: kubernetes-1-26-eks-33 ova: bottlerocket: {} raw: @@ -1285,7 +1285,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-31-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-33-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -1519,7 +1519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-31-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-33-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -1613,7 +1613,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-27-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1845,10 +1845,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.11-eks-d-1-27-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.11-eks-d-1-27-27-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.27.11 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-25.yaml - name: kubernetes-1-27-eks-25 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-27.yaml + name: kubernetes-1-27-eks-27 ova: bottlerocket: {} raw: @@ -2063,7 +2063,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-27-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -2297,7 +2297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-25-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-27-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -2391,7 +2391,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-18-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-20-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -2623,10 +2623,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.7-eks-d-1-28-18-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.7-eks-d-1-28-20-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.28.7 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-18.yaml - name: kubernetes-1-28-eks-18 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-20.yaml + name: kubernetes-1-28-eks-20 ova: bottlerocket: {} raw: @@ -2841,7 +2841,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-18-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-20-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3075,7 +3075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-18-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-20-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3169,7 +3169,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-7-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-9-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -3401,10 +3401,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.1-eks-d-1-29-7-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.1-eks-d-1-29-9-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.29.1 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-7.yaml - name: kubernetes-1-29-eks-7 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-9.yaml + name: kubernetes-1-29-eks-9 ova: bottlerocket: {} raw: @@ -3619,7 +3619,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-7-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-9-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3853,7 +3853,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-7-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-9-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3947,7 +3947,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-2-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -4179,10 +4179,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-rc.0-eks-d-1-30-1-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.30.0-rc.0 - manifestUrl: https://eks-d-postsubmit-artifacts.s3.us-west-2.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-1.yaml - name: kubernetes-1-30-eks-1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-eks-d-1-30-2-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.30.0 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-2.yaml + name: kubernetes-1-30-eks-2 ova: bottlerocket: {} raw: @@ -4397,7 +4397,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-2-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -4631,7 +4631,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-2-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: From 1f3de757d20876b92f687343932f9cb0d88373a0 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Thu, 25 Apr 2024 17:59:38 -0700 Subject: [PATCH 096/193] Update validation for the cluster name length (#8046) --- pkg/api/v1alpha1/cluster.go | 9 ++++++--- pkg/api/v1alpha1/cluster_test.go | 6 +++--- pkg/validations/input_test.go | 6 +++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index 0d5d0d1ca6e4..c36230b0808b 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -396,9 +396,12 @@ func ValidateClusterName(clusterName string) error { } func ValidateClusterNameLength(clusterName string) error { - // vSphere has the maximum length for clusters to be 80 chars - if len(clusterName) > 80 { - return fmt.Errorf("number of characters in %v should be less than 81", clusterName) + // docker container hostname can have a maximum length of 64 characters. we append "-eks-a-cluster" + // to get the KinD cluster's name and on top of this, KinD also adds a "-control-plane suffix" to + // the cluster name to arrive at the name for the control plane node (container), which makes the + // control plane node name 64 characters in length. + if len(clusterName) > 35 { + return fmt.Errorf("number of characters in %v should be less than 36", clusterName) } return nil } diff --git a/pkg/api/v1alpha1/cluster_test.go b/pkg/api/v1alpha1/cluster_test.go index 39146ef1e1ee..5a7c5e8b097f 100644 --- a/pkg/api/v1alpha1/cluster_test.go +++ b/pkg/api/v1alpha1/cluster_test.go @@ -79,13 +79,13 @@ func TestClusterNameLength(t *testing.T) { }{ { name: "SuccessClusterNameLength", - clusterName: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm", + clusterName: "cluster-name-less-than-36-chars", wantErr: nil, }, { name: "FailureClusterNameLength", - clusterName: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345", - wantErr: errors.New("number of characters in qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345 should be less than 81"), + clusterName: "cluster-name-equals-to-36-characters", + wantErr: errors.New("number of characters in cluster-name-equals-to-36-characters should be less than 36"), }, } diff --git a/pkg/validations/input_test.go b/pkg/validations/input_test.go index 38fc103afb25..53ad3c2d3a5f 100644 --- a/pkg/validations/input_test.go +++ b/pkg/validations/input_test.go @@ -94,9 +94,9 @@ func TestValidateClusterNameArg(t *testing.T) { }, { name: "Failure Cluster Length", - args: []string{"qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345"}, - expectedError: errors.New("number of characters in qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345 should be less than 81"), - expectedArg: "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm12345", + args: []string{"cluster-name-equals-to-36-characters"}, + expectedError: errors.New("number of characters in cluster-name-equals-to-36-characters should be less than 36"), + expectedArg: "cluster-name-equals-to-36-characters", }, } From 0f7c5117560b6d4e808fe86e11b3e36e040e9f5c Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Fri, 26 Apr 2024 12:46:39 -0700 Subject: [PATCH 097/193] [PR BOT] Generate release testdata files (#8048) --- .../testdata/main-bundle-release.yaml | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 3569d00f0ff7..3ad2b1ab3896 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -447,11 +447,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -462,8 +462,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -1225,11 +1225,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -1240,8 +1240,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -2003,11 +2003,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2018,8 +2018,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -2781,11 +2781,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2796,8 +2796,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -3559,11 +3559,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -3574,8 +3574,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -4337,11 +4337,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -4352,8 +4352,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.2/metadata.yaml - version: v1.3.2+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: From bb38a2ed2e5e19ee8078a46822b0f78c15eeaaf2 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Fri, 26 Apr 2024 15:03:39 -0700 Subject: [PATCH 098/193] Add changelog for patch 19.4 (#8052) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- docs/content/en/docs/whatsnew/changelog.md | 26 ++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index f168e6374780..9e39cdd58794 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -30,6 +30,32 @@ description: > * When upgrading to a new minor version, a new OS image must be created using the new image-builder CLI pertaining to that release. {{% /alert %}} + +## [v0.19.4](https://github.com/aws/eks-anywhere/releases/tag/v0.19.4) +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | + +### Changed +- Support Docs site for penultime EKS-A version [#8010](https://github.com/aws/eks-anywhere/pull/8010) +- Update Ubuntu 22.04 ISO URLs to latest stable release [#3114](https://github.com/aws/eks-anywhere-build-tooling/pull/3114) +- Upgraded EKS-D: + - `v1-25-eks-35` to [`v1-25-eks-37`](https://distro.eks.amazonaws.com/releases/1-25/37/) + - `v1-26-eks-31` to [`v1-26-eks-33`](https://distro.eks.amazonaws.com/releases/1-26/33/) + - `v1-27-eks-25` to [`v1-27-eks-27`](https://distro.eks.amazonaws.com/releases/1-27/27/) + - `v1-28-eks-18` to [`v1-28-eks-20`](https://distro.eks.amazonaws.com/releases/1-28/20/) + - `v1-29-eks-7` to [`v1-29-eks-9`](https://distro.eks.amazonaws.com/releases/1-29/9/) + +### Fixed +- Added processor for Tinkerbell Template Config [#7816](https://github.com/aws/eks-anywhere/issues/7816) +- Added nil check for eksa-version when setting etcd url [#8018](https://github.com/aws/eks-anywhere/pull/8018) +- Fixed registry mirror secret credentials set to empty [#7933](https://github.com/aws/eks-anywhere/pull/7933) + ## [v0.19.3](https://github.com/aws/eks-anywhere/releases/tag/v0.19.3) ### Supported OS version details From b0f98bdc266e73f9d7d9831d8ea0794fe87fad08 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Fri, 26 Apr 2024 17:01:39 -0700 Subject: [PATCH 099/193] Update gitops hyperlink in the docs (#8055) --- docs/content/en/docs/clustermgmt/cluster-flux.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/en/docs/clustermgmt/cluster-flux.md b/docs/content/en/docs/clustermgmt/cluster-flux.md index 3b95286f15da..01f6635248fc 100755 --- a/docs/content/en/docs/clustermgmt/cluster-flux.md +++ b/docs/content/en/docs/clustermgmt/cluster-flux.md @@ -11,13 +11,13 @@ description: > ## GitOps Support (optional) -EKS Anywhere supports a [GitOps](https://www.weave.works/technologies/gitops/) workflow for the management of your cluster. +EKS Anywhere supports a [GitOps](https://www.gitops.tech/#what-is-gitops) workflow for the management of your cluster. When you create a cluster with GitOps enabled, EKS Anywhere will automatically commit your cluster configuration to the provided GitHub repository and install a GitOps toolkit on your cluster which watches that committed configuration file. You can then manage the scale of the cluster by making changes to the version controlled cluster configuration file and committing the changes. Once a change has been detected by the GitOps controller running in your cluster, the scale of the cluster will be adjusted to match the committed configuration file. -If you'd like to learn more about GitOps, and the associated best practices, [check out this introduction from Weaveworks](https://www.weave.works/technologies/gitops/). +If you'd like to learn more about GitOps, and the associated best practices, [check out this introduction from Weaveworks](https://www.gitops.tech/#what-is-gitops). >**_NOTE:_** Installing a GitOps controller can be done during cluster creation or through upgrade. In the event that GitOps installation fails, EKS Anywhere cluster creation will continue. From 6193c375873cf3814eae2d95fdbe81b823edae11 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Fri, 26 Apr 2024 23:48:39 -0700 Subject: [PATCH 100/193] Fix typo in cluster-terraform.md (#8059) --- docs/content/en/docs/clustermgmt/cluster-terraform.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/en/docs/clustermgmt/cluster-terraform.md b/docs/content/en/docs/clustermgmt/cluster-terraform.md index 35dbf7932b50..dc3b286285c5 100644 --- a/docs/content/en/docs/clustermgmt/cluster-terraform.md +++ b/docs/content/en/docs/clustermgmt/cluster-terraform.md @@ -67,7 +67,7 @@ how to scale your EKS Anywhere worker nodes using the Terraform Kubernetes provi 3. Configure the Terraform cluster resource definition generated in step 2 - Set `metadata.generation` as a [computed field](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest#computed-fields). Add the following to your cluster resource configuration ```bash - computed_fields = ["metadata.generated"] + computed_fields = ["metadata.generation"] ``` - Configure the field manager to [force reconcile managed resources](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest#field_manager). Add the following configuration block to your cluster resource: ```bash @@ -79,7 +79,7 @@ how to scale your EKS Anywhere worker nodes using the Terraform Kubernetes provi - Remove the `generation` field from the `metadata` of the cluster - Your Terraform cluster resource should look similar to this: ```bash - computed_fields = ["metadata.generated"] + computed_fields = ["metadata.generation"] field_manager { force_conflicts = true } From 585c5c321a8fe71d02de39c604089969b5787fe7 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Sat, 27 Apr 2024 10:37:40 -0700 Subject: [PATCH 101/193] Update operating system management overview in the docs (#8064) --- docs/content/en/docs/osmgmt/overview.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/docs/osmgmt/overview.md b/docs/content/en/docs/osmgmt/overview.md index 3d09a04734d3..d6645f4be6d4 100644 --- a/docs/content/en/docs/osmgmt/overview.md +++ b/docs/content/en/docs/osmgmt/overview.md @@ -22,7 +22,7 @@ Reference the table below for the operating systems supported per deployment opt | Ubuntu | 20.04.x, 22.04.x | | RHEL | 8.x, 9.x* | -*Nutanix only +*Nutanix and CloudStack only With the vSphere, bare metal, Snow, CloudStack and Nutanix deployment options, EKS Anywhere provisions the operating system when new machines are deployed during cluster creation, upgrade, and scaling operations. You can configure the operating system to use through the EKS Anywhere cluster spec, which varies by deployment option. See the deployment option sections below for an overview of how the operating system configuration works per deployment option. From 0a9a49574de987b352d95349e4fd98af2dc83b39 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Sat, 27 Apr 2024 12:31:39 -0700 Subject: [PATCH 102/193] Update operating system management section in the docs (#8063) --- docs/content/en/docs/osmgmt/artifacts.md | 2 +- docs/content/en/docs/osmgmt/overview.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/en/docs/osmgmt/artifacts.md b/docs/content/en/docs/osmgmt/artifacts.md index bcc562c73901..f7429b72d6d4 100644 --- a/docs/content/en/docs/osmgmt/artifacts.md +++ b/docs/content/en/docs/osmgmt/artifacts.md @@ -12,7 +12,7 @@ EKS Anywhere supports three different node operating systems: * Bottlerocket: For vSphere and Bare Metal providers * Ubuntu: For vSphere, Bare Metal, Nutanix, and Snow providers -* Red Hat Enterprise Linux (RHEL): For vSphere, CloudStack, and Bare Metal providers +* Red Hat Enterprise Linux (RHEL): For vSphere, CloudStack, Nutanix, and Bare Metal providers Bottlerocket OVAs and images are distributed by the EKS Anywhere project. To build your own Ubuntu-based or RHEL-based EKS Anywhere node, see [Building node images]({{< relref "#building-node-images">}}). diff --git a/docs/content/en/docs/osmgmt/overview.md b/docs/content/en/docs/osmgmt/overview.md index d6645f4be6d4..1c35f72aa736 100644 --- a/docs/content/en/docs/osmgmt/overview.md +++ b/docs/content/en/docs/osmgmt/overview.md @@ -39,4 +39,4 @@ To configure the operating to use for EKS Anywhere clusters on Snow, use the [`S To configure the operating system to use for EKS Anywhere clusters on CloudStack, use the [`CloudStackMachineConfig` `spec.template.name` field]({{< ref "/docs/getting-started/cloudstack/cloud-spec#templateidname-required" >}}). At this time, only RHEL is supported for use with EKS Anywhere clusters on CloudStack. Changing the template name field after cluster creation will result in the deployment of new machines. ## Nutanix -To configure the operating system to use for EKS Anywhere clusters on Nutanix, use the [`NutanixMachineConfig` `spec.image.name` field]({{< ref "/docs/getting-started/nutanix/nutanix-spec#imagename-name-or-uuid-required" >}}) or the image uuid field. At this time, only Ubuntu is supported for use with EKS Anywhere clusters on Nutanix. Changing the image name or uuid field after cluster creation will result in the deployment of new machines. +To configure the operating system to use for EKS Anywhere clusters on Nutanix, use the [`NutanixMachineConfig` `spec.image.name` field]({{< ref "/docs/getting-started/nutanix/nutanix-spec#imagename-name-or-uuid-required" >}}) or the image uuid field. At this time, only Ubuntu and RHEL are supported for use with EKS Anywhere clusters on Nutanix. Changing the image name or uuid field after cluster creation will result in the deployment of new machines. From bafe53a8341253b4742bf09bb225e183c6992c04 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Mon, 29 Apr 2024 08:37:42 -0700 Subject: [PATCH 103/193] Fix word repetitions in tinkerbell docs (#8068) --- .../docs/getting-started/baremetal/tinkerbell-overview.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md b/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md index f871c152e172..c24c070b9160 100644 --- a/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md +++ b/docs/content/en/docs/getting-started/baremetal/tinkerbell-overview.md @@ -101,7 +101,7 @@ eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-2 47s The following shows snippets from the `tasks.bmc` output that represent the three tasks: Power Off, enable network boot, and Power On. ```bash -kubectl describe tasks.bmc -n eksa-system eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-0 +kubectl describe tasks.bmc -n eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-0 ``` ``` ... @@ -115,7 +115,7 @@ Status: ``` ```bash -kubectl describe tasks.bmc -n eksa-system eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-1 +kubectl describe tasks.bmc -n eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-1 ``` ``` ... @@ -132,7 +132,7 @@ Status: ``` ```bash -kubectl describe tasks.bmc -n eksa-system eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-2 +kubectl describe tasks.bmc -n eksa-system mycluster-md-0-1656099863422-vxh2-provision-task-2 ``` ``` Task: From 7cffe231e0928fc86e1df0cd46204f3c4baff4e7 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Mon, 29 Apr 2024 09:02:43 -0700 Subject: [PATCH 104/193] Update security best practices in the docs (#8062) --- docs/content/en/docs/clustermgmt/security/best-practices.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/content/en/docs/clustermgmt/security/best-practices.md b/docs/content/en/docs/clustermgmt/security/best-practices.md index 277f99306cc2..2b36ddb461a9 100644 --- a/docs/content/en/docs/clustermgmt/security/best-practices.md +++ b/docs/content/en/docs/clustermgmt/security/best-practices.md @@ -98,8 +98,7 @@ EKS Anywhere stores sensitive information, like the vSphere credentials and GitH These secret objects are namespaced, for example in the `eksa-system` and `flux-system` namespace, and limiting access to the sensitive namespaces will ensure that these secrets will not be exposed. Additionally, limit access to the underlying node. Access to the node could allow access to the secret content. -EKS Anywhere does not currently support encryption-at-rest for Kubernetes secrets. -EKS Anywhere support for [Key Management Services (KMS)](https://kubernetes.io/docs/tasks/administer-cluster/kms-provider/) is planned. +EKS Anywhere also supports encryption-at-rest for Kubernetes secrets. See [etcd encryption]({{< relref "../../getting-started/optional/etcdencryption" >}}) for more details. ### The EKS Anywhere `kubeconfig` file From 6c7fc9e58dc146727b21289fea5a7b5867bd5836 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Mon, 29 Apr 2024 20:42:41 -0700 Subject: [PATCH 105/193] Add callout for packages not working with latest patch (#8077) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- docs/content/en/docs/whatsnew/changelog.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 9e39cdd58794..8bae2c1fda47 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -56,6 +56,8 @@ description: > - Added nil check for eksa-version when setting etcd url [#8018](https://github.com/aws/eks-anywhere/pull/8018) - Fixed registry mirror secret credentials set to empty [#7933](https://github.com/aws/eks-anywhere/pull/7933) +**Note:** EKS Anywhere Packages workflow doesn't work with the current version as there was a bug identified with the associated package controller. Team is actively working on a fix and will have a follow up patch version shortly with the fix for packages workflow. + ## [v0.19.3](https://github.com/aws/eks-anywhere/releases/tag/v0.19.3) ### Supported OS version details From c2e775bad19ee5639ba16a91261a297eab2fba27 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Mon, 29 Apr 2024 23:45:42 -0700 Subject: [PATCH 106/193] Add warning to call out packages workflow for patch 19.4 (#8078) * Add warning for packages workflowfor patch v0.19.4 Signed-off-by: Rahul Ganesh * improve warning message Signed-off-by: Rahul Ganesh --------- Signed-off-by: Rahul Ganesh --- docs/content/en/docs/whatsnew/changelog.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 8bae2c1fda47..9b1bce5cc918 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -24,6 +24,7 @@ description: > * [Building Ubuntu and Red Hat node images]({{< relref "../osmgmt/artifacts/#building-node-images" >}}) * [Downloading Bottlerocket node images]({{< relref "../osmgmt/artifacts/#download-bottlerocket-node-images" >}}) * [Upgrading an EKS Anywhere cluster]({{< relref "../clustermgmt/cluster-upgrades" >}}) +* EKS Anywhere version `v0.19.4` introduced a regression in the Curated Packages workflow due to a bug in the associated Packages controller version (`v0.4.2`). This will be fixed in the next patch release. {{% /alert %}} {{% alert title="General Information" color="info" %}} @@ -56,8 +57,6 @@ description: > - Added nil check for eksa-version when setting etcd url [#8018](https://github.com/aws/eks-anywhere/pull/8018) - Fixed registry mirror secret credentials set to empty [#7933](https://github.com/aws/eks-anywhere/pull/7933) -**Note:** EKS Anywhere Packages workflow doesn't work with the current version as there was a bug identified with the associated package controller. Team is actively working on a fix and will have a follow up patch version shortly with the fix for packages workflow. - ## [v0.19.3](https://github.com/aws/eks-anywhere/releases/tag/v0.19.3) ### Supported OS version details From ff52ad2f5cbc0103ec43b4a5d04c41eb111b87ff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 10:54:43 -0700 Subject: [PATCH 107/193] Bump golangci/golangci-lint-action from 4 to 5 (#8074) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4 to 5. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v4...v5) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index a52e5288faad..e350fde85912 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -23,7 +23,7 @@ jobs: check-latest: true cache: true - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v5 with: version: v1.56.2 only-new-issues: true From 1e31f7d263153f77498ab75de5b69a0dfa19ce70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 13:06:47 -0700 Subject: [PATCH 108/193] Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 in /release/cli (#8080) Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.33.0 to 1.33.1. - [Release notes](https://github.com/onsi/gomega/releases) - [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md) - [Commits](https://github.com/onsi/gomega/compare/v1.33.0...v1.33.1) --- updated-dependencies: - dependency-name: github.com/onsi/gomega dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 14 +++++++------- release/cli/go.sum | 39 ++++++++++++++++++++------------------- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 1c5198b33ec6..a534fe25d29c 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -11,7 +11,7 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.4.1 github.com/mitchellh/go-homedir v1.1.0 - github.com/onsi/gomega v1.33.0 + github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 @@ -145,16 +145,16 @@ require ( go.opentelemetry.io/otel/trace v1.20.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.20.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index a7354a8a93ce..60560503fd80 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -252,7 +252,8 @@ github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfC github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= @@ -308,8 +309,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -490,13 +491,13 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= -github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -669,8 +670,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -685,8 +686,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -711,8 +712,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -753,14 +754,14 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -794,8 +795,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From a6f327824d4407d0532ce9a580620404477e4962 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Tue, 30 Apr 2024 21:08:47 -0700 Subject: [PATCH 109/193] Update configuration section in docs (#8076) --- .../cluster-upgrades/airgapped-upgrades.md | 2 +- .../cluster-upgrades/baremetal-upgrades.md | 2 +- .../cluster_clusterNetwork.html | 12 ++-- .../docs/getting-started/airgapped/_index.md | 2 +- .../getting-started/baremetal/bare-spec.md | 60 +++++++++---------- .../baremetal/baremetal-getstarted.md | 2 +- .../getting-started/cloudstack/cloud-spec.md | 22 +++---- .../getting-started/nutanix/nutanix-spec.md | 40 ++++++------- .../en/docs/getting-started/snow/snow-spec.md | 38 ++++++------ .../getting-started/vsphere/vsphere-spec.md | 38 ++++++------ docs/content/en/docs/osmgmt/artifacts.md | 4 +- docs/content/en/docs/osmgmt/overview.md | 2 +- docs/content/en/docs/overview/faq/_index.md | 2 +- 13 files changed, 113 insertions(+), 113 deletions(-) diff --git a/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md b/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md index b9852a228dbe..207ee1f60001 100644 --- a/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md +++ b/docs/content/en/docs/clustermgmt/cluster-upgrades/airgapped-upgrades.md @@ -22,7 +22,7 @@ The procedure to upgrade EKS Anywhere clusters in airgapped environments is simi If the previous steps succeeded, all of the required EKS Anywhere dependencies are now present in your local registry. Before you upgrade your EKS Anywhere cluster, configure `registryMirrorConfiguration` in your EKS Anywhere cluster specification with the information for your local registry. For details see the [Registry Mirror Configuration documentation.]({{< relref "../../getting-started/optional/registrymirror/#registry-mirror-cluster-spec" >}}) ->**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of the upgraded node operating system image and hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl" >}}) +>**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of the upgraded node operating system image and hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}) ### Next Steps - [Build upgraded node operating system images for your cluster]({{< relref "../../osmgmt/artifacts/#building-images-for-a-specific-eks-anywhere-version" >}}) diff --git a/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md b/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md index aed83dd67c67..d2551f2f4a42 100755 --- a/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md +++ b/docs/content/en/docs/clustermgmt/cluster-upgrades/baremetal-upgrades.md @@ -108,7 +108,7 @@ spec: ... ``` ->**_NOTE:_** If you have a custom machine image for your nodes in your cluster config yaml or to upgrade a node or group of nodes to a new operating system version (ie. RHEL 8.7 to RHEL 8.8), you may also need to update your [`TinkerbellDatacenterConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbelldatacenterconfig-fields" >}}) or [`TinkerbellMachineConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbellmachineconfig-fields" >}}) with the new operating system image URL [`osImageURL`]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl" >}}). +>**_NOTE:_** If you have a custom machine image for your nodes in your cluster config yaml or to upgrade a node or group of nodes to a new operating system version (ie. RHEL 8.7 to RHEL 8.8), you may also need to update your [`TinkerbellDatacenterConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbelldatacenterconfig-fields" >}}) or [`TinkerbellMachineConfig`]({{< relref "../../getting-started/baremetal/bare-spec/#tinkerbellmachineconfig-fields" >}}) with the new operating system image URL [`osImageURL`]({{< relref "../../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}). and then you will run the [upgrade cluster command]({{< relref "baremetal-upgrades/#upgrade-cluster-command" >}}). diff --git a/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html b/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html index a3f29b337ea7..6e287ea908a5 100644 --- a/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html +++ b/docs/content/en/docs/getting-started/_configuration/cluster_clusterNetwork.html @@ -8,29 +8,29 @@ ### clusterNetwork.cniConfig (required) CNI plugin configuration. Supports `cilium`. -### clusterNetwork.cniConfig.cilium.policyEnforcementMode +### clusterNetwork.cniConfig.cilium.policyEnforcementMode (optional) Optionally specify a policyEnforcementMode of `default`, `always` or `never`. -### clusterNetwork.cniConfig.cilium.egressMasqueradeInterfaces +### clusterNetwork.cniConfig.cilium.egressMasqueradeInterfaces (optional) Optionally specify a network interface name or interface prefix used for masquerading. See EgressMasqueradeInterfaces option. -### clusterNetwork.cniConfig.cilium.skipUpgrade +### clusterNetwork.cniConfig.cilium.skipUpgrade (optional) When true, skip Cilium maintenance during upgrades. Also see Use a custom CNI. -### clusterNetwork.cniConfig.cilium.routingMode +### clusterNetwork.cniConfig.cilium.routingMode (optional) Optionally specify the routing mode. Accepts `default` and `direct`. Also see RoutingMode option. -### clusterNetwork.cniConfig.cilium.ipv4NativeRoutingCIDR +### clusterNetwork.cniConfig.cilium.ipv4NativeRoutingCIDR (optional) Optionally specify the CIDR to use when RoutingMode is set to direct. When specified, Cilium assumes networking for this CIDR is preconfigured and hands traffic destined for that range to the Linux network stack without applying any SNAT. -### clusterNetwork.cniConfig.cilium.ipv6NativeRoutingCIDR +### clusterNetwork.cniConfig.cilium.ipv6NativeRoutingCIDR (optional) Optionally specify the IPv6 CIDR to use when RoutingMode is set to direct. When specified, Cilium assumes networking for this CIDR is preconfigured and hands traffic destined for that range to the Linux network stack without diff --git a/docs/content/en/docs/getting-started/airgapped/_index.md b/docs/content/en/docs/getting-started/airgapped/_index.md index e2de6d3a5926..b11a375786f7 100644 --- a/docs/content/en/docs/getting-started/airgapped/_index.md +++ b/docs/content/en/docs/getting-started/airgapped/_index.md @@ -39,7 +39,7 @@ The process for preparing your airgapped environment for EKS Anywhere is summari If the previous steps succeeded, all of the required EKS Anywhere dependencies are now present in your local registry. Before you create your EKS Anywhere cluster, configure `registryMirrorConfiguration` in your EKS Anywhere cluster specification with the information for your local registry. For details see the [Registry Mirror Configuration documentation.]({{< relref "../../getting-started/optional/registrymirror/#registry-mirror-cluster-spec" >}}) ->**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of your node operating system image and the hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../baremetal/bare-spec/#osimageurl" >}}) +>**_NOTE:_** If you are running EKS Anywhere on bare metal, you must configure `osImageURL` and `hookImagesURLPath` in your EKS Anywhere cluster specification with the location of your node operating system image and the hook OS image. For details, reference the [bare metal configuration documentation.]({{< relref "../baremetal/bare-spec/#osimageurl-optional" >}}) ### Next Steps - Review EKS Anywhere [cluster networking requirements]({{< relref "../ports" >}}) diff --git a/docs/content/en/docs/getting-started/baremetal/bare-spec.md b/docs/content/en/docs/getting-started/baremetal/bare-spec.md index 6b256cab3a2a..f8ae2746ad31 100644 --- a/docs/content/en/docs/getting-started/baremetal/bare-spec.md +++ b/docs/content/en/docs/getting-started/baremetal/bare-spec.md @@ -122,7 +122,7 @@ the control plane nodes for kube-apiserver loadbalancing. ### controlPlaneConfiguration.machineGroupRef (required) Refers to the Kubernetes object with Tinkerbell-specific configuration for your nodes. See `TinkerbellMachineConfig Fields` below. -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint (For k8s versions prior to 1.24, `node-role.kubernetes.io/master`. For k8s versions 1.24+, `node-role.kubernetes.io/control-plane`). The default control plane components will tolerate the provided taints. @@ -133,29 +133,29 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with the control plane configuration will cause new nodes to be rolled out, replacing the existing nodes. -#### controlPlaneConfiguration.upgradeRolloutStrategy +#### controlPlaneConfiguration.upgradeRolloutStrategy (optional) Configuration parameters for upgrade strategy. -#### controlPlaneConfiguration.upgradeRolloutStrategy.type +#### controlPlaneConfiguration.upgradeRolloutStrategy.type (optional) Default: `RollingUpdate` Type of rollout strategy. Supported values: `RollingUpdate`,`InPlace`. >**_NOTE:_** The upgrade rollout strategy type must be the same for all control plane and worker nodes. -#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate +#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate (optional) Configuration parameters for customizing rolling upgrade behavior. >**_NOTE:_** The rolling update parameters can only be configured if `upgradeRolloutStrategy.type` is `RollingUpdate`. -#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate.maxSurge +#### controlPlaneConfiguration.upgradeRolloutStrategy.rollingUpdate.maxSurge (optional) Default: 1 This can not be 0 if maxUnavailable is 0. @@ -164,27 +164,27 @@ The maximum number of machines that can be scheduled above the desired number of Example: When this is set to n, the new worker node group can be scaled up immediately by n when the rolling upgrade starts. Total number of machines in the cluster (old + new) never exceeds (desired number of machines + n). Once scale down happens and old machines are brought down, the new worker node group can be scaled up further ensuring that the total number of machines running at any time does not exceed the desired number of machines + n. -### controlPlaneConfiguration.skipLoadBalancerDeployment +### controlPlaneConfiguration.skipLoadBalancerDeployment (optional) Optional field to skip deploying the control plane load balancer. Make sure your infrastructure can handle control plane load balancing when you set this field to true. In most cases, you should not set this field to true. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with Tinkerbell-specific configuration. See `TinkerbellDatacenterConfig Fields` below. ### kubernetesVersion (required) The Kubernetes version you want to use for your cluster. Supported values: `1.28`, `1.27`, `1.26`, `1.25`, `1.24` -### managementCluster +### managementCluster (required) Identifies the name of the management cluster. If your cluster spec is for a standalone or management cluster, this value is the same as the cluster name. -### workerNodeGroupConfigurations +### workerNodeGroupConfigurations (optional) This takes in a list of node groups that you can define for your workers. You can omit `workerNodeGroupConfigurations` when creating Bare Metal clusters. If you omit `workerNodeGroupConfigurations`, control plane nodes will not be tainted and all pods will run on the control plane nodes. This mechanism can be used to deploy Bare Metal clusters on a single server. You can also run multi-node Bare Metal clusters without `workerNodeGroupConfigurations`. >**_NOTE:_** Empty `workerNodeGroupConfigurations` is not supported when Kubernetes version <= 1.21. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations.count (optional) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. @@ -195,52 +195,52 @@ Refers to the Kubernetes object with Tinkerbell-specific configuration for your ### workerNodeGroupConfigurations.name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration +### workerNodeGroupConfigurations.autoscalingConfiguration (optional) Configuration parameters for Cluster Autoscaler. >**_NOTE:_** Autoscaling configuration is not supported when using the `InPlace` upgrade rollout strategy. -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations.taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations.labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations.kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. [Supported values]({{< relref "../../concepts/support-versions/#kubernetes-versions" >}}): `1.28`, `1.27`, `1.26`, `1.25`, `1.24` Must be less than or equal to the cluster `kubernetesVersion` defined at the root level of the cluster spec. The worker node kubernetesVersion must be no more than two minor Kubernetes versions lower than the cluster control plane's Kubernetes version. Removing `workerNodeGroupConfiguration.kubernetesVersion` will trigger an upgrade of the node group to the `kubernetesVersion` defined at the root level of the cluster spec. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy +#### workerNodeGroupConfigurations.upgradeRolloutStrategy (optional) Configuration parameters for upgrade strategy. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.type +#### workerNodeGroupConfigurations.upgradeRolloutStrategy.type (optional) Default: `RollingUpdate` Type of rollout strategy. Supported values: `RollingUpdate`,`InPlace`. >**_NOTE:_** The upgrade rollout strategy type must be the same for all control plane and worker nodes. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate +#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate (optional) Configuration parameters for customizing rolling upgrade behavior. >**_NOTE:_** The rolling update parameters can only be configured if `upgradeRolloutStrategy.type` is `RollingUpdate`. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxSurge +#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxSurge (optional) Default: 1 This can not be 0 if maxUnavailable is 0. @@ -249,7 +249,7 @@ The maximum number of machines that can be scheduled above the desired number of Example: When this is set to n, the new worker node group can be scaled up immediately by n when the rolling upgrade starts. Total number of machines in the cluster (old + new) never exceeds (desired number of machines + n). Once scale down happens and old machines are brought down, the new worker node group can be scaled up further ensuring that the total number of machines running at any time does not exceed the desired number of machines + n. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxUnavailable +#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxUnavailable (optional) Default: 0 This can not be 0 if MaxSurge is 0. @@ -260,17 +260,17 @@ Example: When this is set to n, the old worker node group can be scaled down by ## TinkerbellDatacenterConfig Fields -### tinkerbellIP +### tinkerbellIP (required) Required field to identify the IP address of the Tinkerbell service. This IP address must be a unique IP in the network range that does not conflict with other IPs. Once the Tinkerbell services move from the Admin machine to run on the target cluster, this IP address makes it possible for the stack to be used for future provisioning needs. When separate management and workload clusters are supported in Bare Metal, the IP address becomes a necessity. -### osImageURL +### osImageURL (optional) Optional field to replace the default Bottlerocket operating system. EKS Anywhere can only auto-import Bottlerocket. In order to use Ubuntu or RHEL see [building baremetal node images]({{< relref "../../osmgmt/artifacts/#build-bare-metal-node-images" >}}). This field is also useful if you want to provide a customized operating system image or simply host the standard image locally. To upgrade a node or group of nodes to a new operating system version (ie. RHEL 8.7 to RHEL 8.8), modify this field to point to the new operating system image URL and run [upgrade cluster command]({{< relref "../../clustermgmt/cluster-upgrades/baremetal-upgrades/#upgrade-cluster-command" >}}). The `osImageURL` must contain the `Cluster.Spec.KubernetesVersion` or `Cluster.Spec.WorkerNodeGroupConfiguration[].KubernetesVersion` version (in case of modular upgrade). For example, if the Kubernetes version is 1.24, the `osImageURL` name should include 1.24, 1_24, 1-24 or 124. -### hookImagesURLPath +### hookImagesURLPath (optional) Optional field to replace the HookOS image. This field is useful if you want to provide a customized HookOS image or simply host the standard image locally. See [Artifacts]({{< relref "../../osmgmt/artifacts/#hookos-kernel-and-initial-ramdisk-for-bare-metal" >}}) for details. @@ -291,19 +291,19 @@ my-web-server └── ubuntu-v1.23.7-eks-a-12-amd64.gz ``` -### skipLoadBalancerDeployment +### skipLoadBalancerDeployment (optional) Optional field to skip deploying the default load balancer for Tinkerbell stack. EKS Anywhere for Bare Metal uses `kube-vip` load balancer by default to expose the Tinkerbell stack externally. You can disable this feature by setting this field to `true`. ->**_NOTE:_** If you skip load balancer deployment, you will have to ensure that the Tinkerbell stack is available at [tinkerbellIP]({{< relref "#tinkerbellip" >}}) once the cluster creation is finished. One way to achieve this is by using the [MetalLB]({{< relref "../../packages/metallb" >}}) package. +>**_NOTE:_** If you skip load balancer deployment, you will have to ensure that the Tinkerbell stack is available at [tinkerbellIP]({{< relref "#tinkerbellip-required" >}}) once the cluster creation is finished. One way to achieve this is by using the [MetalLB]({{< relref "../../packages/metallb" >}}) package. ## TinkerbellMachineConfig Fields In the example, there are `TinkerbellMachineConfig` sections for control plane (`my-cluster-name-cp`) and worker (`my-cluster-name`) machine groups. The following fields identify information needed to configure the nodes in each of those groups. >**_NOTE:_** Currently, you can only have one machine group for all machines in the control plane, although you can have multiple machine groups for the workers. > -### hardwareSelector +### hardwareSelector (optional) Use fields under `hardwareSelector` to add key/value pair labels to match particular machines that you identified in the CSV file where you defined the machines in your cluster. Choose any label name you like. For example, if you had added the label `node=cp-machine` to the machines listed in your CSV file that you want to be control plane nodes, the following `hardwareSelector` field would cause those machines to be added to the control plane: @@ -332,7 +332,7 @@ See TinkerbellTemplateConfig fields below. EKS Anywhere will generate default templates based on `osFamily` during the `create` command. You can override this default template by providing your own template here. -### users +### users (optional) The name of the user you want to configure to access your virtual machines through SSH. The default is `ec2-user`. @@ -472,7 +472,7 @@ spec: Pay special attention to the `BOOTCONFIG_CONTENTS` environment section below if you wish to set up console redirection for the kernel and systemd. If you are only using a direct attached monitor as your primary display device, no additional configuration is needed here. -However, if you need all boot output to be shown via a server’s serial console for example, extra configuration should be provided inside `BOOTCONFIG_CONTENTS`. +However, if you need all boot output to be shown via a server's serial console for example, extra configuration should be provided inside `BOOTCONFIG_CONTENTS`. An empty `kernel {}` key is provided below in the example; inside this key is where you will specify your console devices. You may specify multiple comma delimited console devices in quotes to a console key as such: `console = "tty0", "ttyS0,115200n8"`. diff --git a/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md b/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md index 1728608f9cb0..e2c45c3ef580 100644 --- a/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md +++ b/docs/content/en/docs/getting-started/baremetal/baremetal-getstarted.md @@ -213,7 +213,7 @@ Follow these steps if you want to use your initial cluster to create and manage > ``` > * For creating multiple workload clusters, it is essential that the hardware labels and selectors defined for a given workload cluster are unique to that workload cluster. For instance, for an EKS Anywhere cluster named `eksa-workload1`, the hardware that is assigned for this cluster should have labels that are only going to be used for this cluster like `type=eksa-workload1-cp` and `type=eksa-workload1-worker`. Another workload cluster named `eksa-workload2` can have labels like `type=eksa-workload2-cp` and `type=eksa-workload2-worker`. Please note that even though labels can be arbitrary, they need to be unique for each workload cluster. Not specifying unique cluster labels can cause cluster creations to behave in unexpected ways which may lead to unsuccessful creations and unstable clusters. - See the [hardware selectors]({{< relref "./bare-spec/#hardwareselector" >}}) section for more information + See the [hardware selectors]({{< relref "./bare-spec/#hardwareselector-optional" >}}) section for more information 1. Check the workload cluster: diff --git a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md index 375d30fdfb4e..94235215647c 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md @@ -190,7 +190,7 @@ creation process are [here]({{< relref "./cloudstack-prereq/." >}}) ### controlPlaneConfiguration.machineGroupRef (required) Refers to the Kubernetes object with CloudStack specific configuration for your nodes. See `CloudStackMachineConfig Fields` below. -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint, `node-role.kubernetes.io/master`. The default control plane components will tolerate the provided taints. @@ -201,7 +201,7 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. @@ -215,13 +215,13 @@ The `ds.meta_data.failuredomain` value will be replaced with a failuredomain nam Modifying the labels associated with the control plane configuration will cause new nodes to be rolled out, replacing the existing nodes. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with CloudStack environment specific configuration. See `CloudStackDatacenterConfig Fields` below. -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with CloudStack specific configuration for your etcd members. See `CloudStackMachineConfig Fields` below. ### kubernetesVersion (required) @@ -235,7 +235,7 @@ If this is a standalone cluster or if it were serving as the management cluster This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations.count (required) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. @@ -246,20 +246,20 @@ Refers to the Kubernetes object with CloudStack specific configuration for your ### workerNodeGroupConfigurations.name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations.taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations.labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. A special label value is supported by the CAPC provider: @@ -273,7 +273,7 @@ The `ds.meta_data.failuredomain` value will be replaced with a failuredomain nam Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations.kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 ## CloudStackDatacenterConfig diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md index fe01d944aee5..41f969da39a0 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md @@ -190,7 +190,7 @@ creation process are [here]({{< relref "./nutanix-prereq/#prepare-a-nutanix-envi ### workerNodeGroupConfigurations (required) This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations.count (required) Number of worker nodes. Optional if `autoscalingConfiguration` is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. @@ -201,22 +201,22 @@ Refers to the Kubernetes object with Nutanix specific configuration for your nod ### workerNodeGroupConfigurations.name (required) Name of the worker node group (default: `md-0`) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group’s autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group’s autoscaling configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations.kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with Nutanix specific configuration for your etcd members. See `NutanixMachineConfig` fields below. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with Nutanix environment specific configuration. See `NutanixDatacenterConfig` fields below. ### kubernetesVersion (required) @@ -254,22 +254,22 @@ __Example__:
## NutanixMachineConfig Fields -### cluster +### cluster (required) Reference to the Prism Element cluster. -### cluster.type +### cluster.type (required) Type to identify the Prism Element cluster. (Permitted values: `name` or `uuid`) -### cluster.name +### cluster.name (required) Name of the Prism Element cluster. -### cluster.uuid +### cluster.uuid (required) UUID of the Prism Element cluster. -### image +### image (required) Reference to the OS image used for the system disk. -### image.type +### image.type (required) Type to identify the OS image. (Permitted values: `name` or `uuid`) ### image.name (`name` or `UUID` required) @@ -280,37 +280,37 @@ The `image.name` must contain the `Cluster.Spec.KubernetesVersion` or `Cluster.S UUID of the image The name of the image associated with the `uuid` must contain the `Cluster.Spec.KubernetesVersion` or `Cluster.Spec.WorkerNodeGroupConfiguration[].KubernetesVersion` version (in case of modular upgrade). For example, if the Kubernetes version is 1.24, the name associated with `image.uuid` field must include 1.24, 1_24, 1-24 or 124. -### memorySize +### memorySize (optional) Size of RAM on virtual machines (Default: `4Gi`) ### osFamily (optional) Operating System on virtual machines. Permitted values: `ubuntu` and `redhat`. (Default: `ubuntu`) -### subnet +### subnet (required) Reference to the subnet to be assigned to the VMs. ### subnet.name (`name` or `UUID` required) Name of the subnet. -### subnet.type +### subnet.type (required) Type to identify the subnet. (Permitted values: `name` or `uuid`) ### subnet.uuid (`name` or `UUID` required) UUID of the subnet. -### systemDiskSize +### systemDiskSize (optional) Amount of storage assigned to the system disk. (Default: `40Gi`) -### vcpuSockets +### vcpuSockets (optional) Amount of vCPU sockets. (Default: `2`) -### vcpusPerSocket +### vcpusPerSocket (optional) Amount of vCPUs per socket. (Default: `1`) ### project (optional) Reference to an existing project used for the virtual machines. -### project.type +### project.type (required) Type to identify the project. (Permitted values: `name` or `uuid`) ### project.name (`name` or `UUID` required) diff --git a/docs/content/en/docs/getting-started/snow/snow-spec.md b/docs/content/en/docs/getting-started/snow/snow-spec.md index 31f81aab96df..0290afde4de2 100644 --- a/docs/content/en/docs/getting-started/snow/snow-spec.md +++ b/docs/content/en/docs/getting-started/snow/snow-spec.md @@ -125,7 +125,7 @@ range that does not conflict with other devices. >**_NOTE:_** This IP should be outside the network DHCP range as it is a floating IP that gets assigned to one of the control plane nodes for kube-apiserver loadbalancing. -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint. For k8s versions prior to 1.24, it replaces `node-role.kubernetes.io/master`. For k8s versions 1.24+, it replaces `node-role.kubernetes.io/control-plane`. The default control plane components will tolerate the provided taints. @@ -136,7 +136,7 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. @@ -147,7 +147,7 @@ the existing nodes. This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations.count (required) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. @@ -158,36 +158,36 @@ Refers to the Kubernetes object with Snow specific configuration for your nodes. ### workerNodeGroupConfigurations.name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations.taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations.labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations.kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members. -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with Snow specific configuration for your etcd members. See `SnowMachineConfig Fields` below. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with Snow environment specific configuration. See `SnowDatacenterConfig Fields` below. ### kubernetesVersion (required) @@ -195,7 +195,7 @@ The Kubernetes version you want to use for your cluster. Supported values: `1.28 ## SnowDatacenterConfig Fields -### identityRef +### identityRef (required) Refers to the Kubernetes secret object with Snow devices credentials used to reconcile the cluster. ## SnowMachineConfig Fields @@ -241,7 +241,7 @@ Refers to a `SnowIPPool` object which provides a range of ip addresses. When spe ### containersVolume (optional) Configuration option for customizing containers data storage volume. -### containersVolume.size +### containersVolume.size (optional) Size of the storage for containerd runtime in Gi. The field is optional for Ubuntu and if specified, the size must be no smaller than 8 Gi. @@ -257,10 +257,10 @@ Type of the containers volume. Permitted values: `sbp1`, `sbg1`. (Default: `sbp1 ### nonRootVolumes (optional) Configuration options for the non root storage volumes. -### nonRootVolumes[0].deviceName +### nonRootVolumes[0].deviceName (optional) Non root volume device name. Must be specified and cannot have prefix "/dev/sda" as it is reserved for root volume and containers volume. -### nonRootVolumes[0].size +### nonRootVolumes[0].size (optional) Size of the storage device for the non root volume. Must be no smaller than 8 Gi. ### nonRootVolumes[0].type (optional) @@ -270,14 +270,14 @@ Type of the non root volume. Permitted values: `sbp1`, `sbg1`. (Default: `sbp1`) ## SnowIPPool Fields -### pools[0].ipStart +### pools[0].ipStart (optional) Start address of an IP range. -### pools[0].ipEnd +### pools[0].ipEnd (optional) End address of an IP range. -### pools[0].subnet +### pools[0].subnet (optional) An IP subnet for determining whether an IP is within the subnet. -### pools[0].gateway +### pools[0].gateway (optional) Gateway of the subnet for routing purpose. diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md index 84d758b66de4..7e4e6f55cdd7 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md @@ -34,33 +34,33 @@ spec: machineGroupRef: # vSphere-specific Kubernetes node config (required) kind: VSphereMachineConfig name: my-cluster-machines - taints: # Taints applied to control plane nodes + taints: # Taints applied to control plane nodes - key: "key1" value: "value1" effect: "NoSchedule" - labels: # Labels applied to control plane nodes + labels: # Labels applied to control plane nodes "key1": "value1" "key2": "value2" - datacenterRef: # Kubernetes object with vSphere-specific config + datacenterRef: # Kubernetes object with vSphere-specific config kind: VSphereDatacenterConfig name: my-cluster-datacenter externalEtcdConfiguration: - count: 3 # Number of etcd members - machineGroupRef: # vSphere-specific Kubernetes etcd config + count: 3 # Number of etcd members + machineGroupRef: # vSphere-specific Kubernetes etcd config kind: VSphereMachineConfig name: my-cluster-machines kubernetesVersion: "1.25" # Kubernetes version to use for the cluster (required) workerNodeGroupConfigurations: # List of node groups you can define for workers (required) - - count: 2 # Number of worker nodes + - count: 2 # Number of worker nodes machineGroupRef: # vSphere-specific Kubernetes node objects (required) kind: VSphereMachineConfig name: my-cluster-machines name: md-0 # Name of the worker nodegroup (required) - taints: # Taints to apply to worker node group nodes + taints: # Taints to apply to worker node group nodes - key: "key1" value: "value1" effect: "NoSchedule" - labels: # Labels to apply to worker node group nodes + labels: # Labels to apply to worker node group nodes "key1": "value1" "key2": "value2" --- @@ -137,7 +137,7 @@ range that does not conflict with other VMs. the control plane nodes for kube-apiserver loadbalancing. Suggestions on how to ensure this IP does not cause issues during cluster creation process are [here]({{< relref "../vsphere/vsphere-prereq/#prepare-a-vmware-vsphere-environment" >}}) -### controlPlaneConfiguration.taints +### controlPlaneConfiguration.taints (optional) A list of taints to apply to the control plane nodes of the cluster. Replaces the default control plane taint. For k8s versions prior to 1.24, it replaces `node-role.kubernetes.io/master`. For k8s versions 1.24+, it replaces `node-role.kubernetes.io/control-plane`. The default control plane components will tolerate the provided taints. @@ -148,7 +148,7 @@ Modifying the taints associated with the control plane configuration will cause Any pods that you run on the control plane nodes must tolerate the taints you provide in the control plane configuration. > -### controlPlaneConfiguration.labels +### controlPlaneConfiguration.labels (optional) A list of labels to apply to the control plane nodes of the cluster. This is in addition to the labels that EKS Anywhere will add by default. @@ -159,7 +159,7 @@ the existing nodes. This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count +### workerNodeGroupConfigurations.count (required) Number of worker nodes. Optional if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. @@ -170,38 +170,38 @@ Refers to the Kubernetes object with vsphere specific configuration for your nod ### workerNodeGroupConfigurations.name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount +### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount +### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints +### workerNodeGroupConfigurations.taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must **NOT** have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels +### workerNodeGroupConfigurations.labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion +### workerNodeGroupConfigurations.kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. [Supported values]({{< relref "../../concepts/support-versions/#kubernetes-versions" >}}): `1.28`, `1.27`, `1.26`, `1.25`, `1.24` Must be less than or equal to the cluster `kubernetesVersion` defined at the root level of the cluster spec. The worker node kubernetesVersion must be no more than two minor Kubernetes versions lower than the cluster control plane's Kubernetes version. Removing `workerNodeGroupConfiguration.kubernetesVersion` will trigger an upgrade of the node group to the `kubernetesVersion` defined at the root level of the cluster spec. -### externalEtcdConfiguration.count +### externalEtcdConfiguration.count (optional) Number of etcd members -### externalEtcdConfiguration.machineGroupRef +### externalEtcdConfiguration.machineGroupRef (optional) Refers to the Kubernetes object with vsphere specific configuration for your etcd members. See [VSphereMachineConfig Fields](#vspheremachineconfig-fields) below. -### datacenterRef +### datacenterRef (required) Refers to the Kubernetes object with vsphere environment specific configuration. See [VSphereDatacenterConfig Fields](#vspheredatacenterconfig-fields) below. ### kubernetesVersion (required) diff --git a/docs/content/en/docs/osmgmt/artifacts.md b/docs/content/en/docs/osmgmt/artifacts.md index f7429b72d6d4..648fa41f2a20 100644 --- a/docs/content/en/docs/osmgmt/artifacts.md +++ b/docs/content/en/docs/osmgmt/artifacts.md @@ -25,7 +25,7 @@ Several code snippets on this page use `curl` and `yq` commands. Refer to the [T Artifacts for EKS Anywhere Bare Metal clusters are listed below. If you like, you can download these images and serve them locally to speed up cluster creation. -See descriptions of the [osImageURL]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl" >}}) and [`hookImagesURLPath`]({{< relref "../getting-started/baremetal/bare-spec#hookimagesurlpath" >}}) fields for details. +See descriptions of the [`osImageURL`]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}) and [`hookImagesURLPath`]({{< relref "../getting-started/baremetal/bare-spec#hookimagesurlpath-optional" >}}) fields for details. ### Ubuntu or RHEL OS images for Bare Metal @@ -627,7 +627,7 @@ These steps use `image-builder` to create an Ubuntu-based or RHEL-based image fo osImageURL: "http:///my-ubuntu-v1.23.9-eks-a-17-amd64.gz" ``` - See descriptions of [osImageURL]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl" >}}) for further information. + See descriptions of [`osImageURL`]({{< relref "../getting-started/baremetal/bare-spec/#osimageurl-optional" >}}) for further information. ### Build CloudStack node images diff --git a/docs/content/en/docs/osmgmt/overview.md b/docs/content/en/docs/osmgmt/overview.md index 1c35f72aa736..16af2b435e1d 100644 --- a/docs/content/en/docs/osmgmt/overview.md +++ b/docs/content/en/docs/osmgmt/overview.md @@ -30,7 +30,7 @@ With the vSphere, bare metal, Snow, CloudStack and Nutanix deployment options, E To configure the operating system to use for EKS Anywhere clusters on vSphere, use the [`VSphereMachingConfig` `spec.template` field]({{< ref "/docs/getting-started/vsphere/vsphere-spec#template-optional" >}}). The template name corresponds to the template you imported into your vSphere environment. See the [Customize OVAs]({{< ref "/docs/getting-started/vsphere/customize/customize-ovas" >}}) and [Import OVAs]({{< ref "/docs/getting-started/vsphere/customize/vsphere-ovas" >}}) documentation pages for more information. Changing the template after cluster creation will result in the deployment of new machines. ## Bare metal -To configure the operating system to use for EKS Anywhere clusters on bare metal, use the [`TinkerbellDatacenterConfig` `spec.osImageURL` field]({{< ref "/docs/getting-started/baremetal/bare-spec#osimageurl" >}}). This field can be used to stream the operating system from a custom location and is required to use Ubuntu or RHEL. You cannot change the `osImageURL` after creating your cluster. To upgrade the operating system, you must replace the image at the existing `osImageURL` location with a new image. Operating system changes are only deployed when an action that triggers a deployment of new machines is triggered, which includes Kubernetes version upgrades only at this time. +To configure the operating system to use for EKS Anywhere clusters on bare metal, use the [`TinkerbellDatacenterConfig` `spec.osImageURL` field]({{< ref "/docs/getting-started/baremetal/bare-spec#osimageurl-optional" >}}). This field can be used to stream the operating system from a custom location and is required to use Ubuntu or RHEL. You cannot change the `osImageURL` after creating your cluster. To upgrade the operating system, you must replace the image at the existing `osImageURL` location with a new image. Operating system changes are only deployed when an action that triggers a deployment of new machines is triggered, which includes Kubernetes version upgrades only at this time. ## Snow To configure the operating to use for EKS Anywhere clusters on Snow, use the [`SnowMachineConfig` `spec.osFamily` field]({{< ref "/docs/getting-started/snow/snow-spec#osfamily" >}}). At this time, only Ubuntu is supported for use with EKS Anywhere clusters on Snow. You can customize the instance image with the [`SnowMachineConfig` `spec.amiID` field]({{< ref "/docs/getting-started/snow/snow-spec#amiid-optional" >}}) and the instance type with the [`SnowMachineConfig` `spec.instanceType` field]({{< ref "/docs/getting-started/snow/snow-spec#instancetype-optional" >}}). Changes to these fields after cluster creation will result in the deployment of new machines. diff --git a/docs/content/en/docs/overview/faq/_index.md b/docs/content/en/docs/overview/faq/_index.md index a13b4691493c..222d99206ebe 100644 --- a/docs/content/en/docs/overview/faq/_index.md +++ b/docs/content/en/docs/overview/faq/_index.md @@ -103,4 +103,4 @@ There would need to be a change to the upstream project to support ESXi. ### Can I deploy EKS Anywhere on a single node? -Yes. Single node cluster deployment is supported for Bare Metal. See [workerNodeGroupConfigurations]({{< relref "../../getting-started/baremetal/bare-spec/#workernodegroupconfigurations">}}) +Yes. Single node cluster deployment is supported for Bare Metal. See [workerNodeGroupConfigurations]({{< relref "../../getting-started/baremetal/bare-spec/#workernodegroupconfigurations-optional">}}) From fcac575af19a42b8388286d57d002f801a9cc7e8 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Tue, 30 Apr 2024 23:02:56 -0700 Subject: [PATCH 110/193] [PR BOT] Generate release testdata files (#8082) --- .../testdata/main-bundle-release.yaml | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 3ad2b1ab3896..c767fa6b566d 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -473,12 +473,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -487,7 +487,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -496,8 +496,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 - version: v0.4.2+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -1251,12 +1251,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -1265,7 +1265,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -1274,8 +1274,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 - version: v0.4.2+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2029,12 +2029,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2043,7 +2043,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2052,8 +2052,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 - version: v0.4.2+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -2807,12 +2807,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -2821,7 +2821,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -2830,8 +2830,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 - version: v0.4.2+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -3585,12 +3585,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -3599,7 +3599,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -3608,8 +3608,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 - version: v0.4.2+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: @@ -4363,12 +4363,12 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: credential-provider-package os: linux - uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/credential-provider-package:v0.4.3-eks-a-v0.0.0-dev-build.1 helmChart: description: Helm chart for eks-anywhere-packages imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.4.3-eks-a-v0.0.0-dev-build.1 packageController: arch: - amd64 @@ -4377,7 +4377,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-packages os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.4.3-eks-a-v0.0.0-dev-build.1 tokenRefresher: arch: - amd64 @@ -4386,8 +4386,8 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: ecr-token-refresher os: linux - uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.2-eks-a-v0.0.0-dev-build.1 - version: v0.4.2+abcdef1 + uri: public.ecr.aws/release-container-registry/ecr-token-refresher:v0.4.3-eks-a-v0.0.0-dev-build.1 + version: v0.4.3+abcdef1 snow: bottlerocketBootstrapSnow: arch: From 1a833ec0617355bd93c414be7d7a2050990090b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 23:14:43 -0700 Subject: [PATCH 111/193] Bump github.com/aws/aws-sdk-go from 1.51.27 to 1.51.31 in /release/cli (#8081) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.51.27 to 1.51.31. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.51.27...v1.51.31) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index a534fe25d29c..8cbf6df901df 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.51.27 + github.com/aws/aws-sdk-go v1.51.31 github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index 60560503fd80..ee624224165d 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.27 h1:ZprksHovT4rFfNBHB+Bc/0p4PTntAnTlZP39DMA/Qp8= -github.com/aws/aws-sdk-go v1.51.27/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.31 h1:4TM+sNc+Dzs7wY1sJ0+J8i60c6rkgnKP1pvPx8ghsSY= +github.com/aws/aws-sdk-go v1.51.31/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From 4119858ebd3bb2d1420b98558859145b15034934 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Wed, 1 May 2024 14:37:59 -0700 Subject: [PATCH 112/193] Fix typo in registry mirror config comment (#8090) --- config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml | 3 ++- config/manifest/eksa-components.yaml | 3 ++- pkg/api/v1alpha1/cluster_types.go | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml index 0f323ebf4a00..492d5b4c0664 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml @@ -539,7 +539,8 @@ spec: in the local registry type: string registry: - description: Name refers to the name of the upstream registry + description: Registry refers to the name of the upstream + registry type: string required: - namespace diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index 6eb5176e7194..76ff8d1e65da 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -4242,7 +4242,8 @@ spec: in the local registry type: string registry: - description: Name refers to the name of the upstream registry + description: Registry refers to the name of the upstream + registry type: string required: - namespace diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index fbc7f79d04aa..54697ebac7ee 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -241,7 +241,7 @@ type RegistryMirrorConfiguration struct { // OCINamespace represents an entity in a local reigstry to group related images. type OCINamespace struct { - // Name refers to the name of the upstream registry + // Registry refers to the name of the upstream registry Registry string `json:"registry"` // Namespace refers to the name of a namespace in the local registry Namespace string `json:"namespace"` From 054f0284ae2fa45d0a42010b2b6ae5d6973de3ae Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Wed, 1 May 2024 17:27:59 -0700 Subject: [PATCH 113/193] Add changelog for v0.19.5 (#8096) * Add changelog for v0.19.5 Signed-off-by: Rahul Ganesh * Add warning for BR for bare metal Signed-off-by: Rahul Ganesh --------- Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- docs/content/en/docs/whatsnew/changelog.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 9b1bce5cc918..0ebd4dc914fa 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -31,6 +31,22 @@ description: > * When upgrading to a new minor version, a new OS image must be created using the new image-builder CLI pertaining to that release. {{% /alert %}} +## [v0.19.5](https://github.com/aws/eks-anywhere/releases/tag/v0.19.5) +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | +* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +### Changed +- Upgraded EKS-Anywhere Packages from `v0.4.2` to [`v0.4.3`](https://github.com/aws/eks-anywhere-packages/releases/tag/v0.4.3) + +### Fixed +- Fixed registry mirror with authentication for EKS Anywhere packages ## [v0.19.4](https://github.com/aws/eks-anywhere/releases/tag/v0.19.4) ### Supported OS version details From 53af650f85bb9570788d4c67cd6cb49877c498ef Mon Sep 17 00:00:00 2001 From: Xu Deng Date: Thu, 2 May 2024 13:02:00 -0400 Subject: [PATCH 114/193] Add emitting of e2e test instance metrics (#8089) --- internal/test/e2e/cloudwatch.go | 87 +++++++++++++++++++++++++++++++++ internal/test/e2e/run.go | 1 + 2 files changed, 88 insertions(+) create mode 100644 internal/test/e2e/cloudwatch.go diff --git a/internal/test/e2e/cloudwatch.go b/internal/test/e2e/cloudwatch.go new file mode 100644 index 000000000000..fe0a7e65c54a --- /dev/null +++ b/internal/test/e2e/cloudwatch.go @@ -0,0 +1,87 @@ +package e2e + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" + + "github.com/aws/eks-anywhere/pkg/logger" +) + +var svc *cloudwatch.CloudWatch + +func init() { + if s, err := session.NewSession(); err == nil { + svc = cloudwatch.New(s) + } else { + fmt.Println("Cannot create CloudWatch service", err) + } +} + +func putInstanceTestResultMetrics(r instanceTestsResults) { + if svc == nil { + logger.Info("Cannot publish metrics as cloudwatch service was not initialized") + return + } + + logger.Info("Publishing instance test result metrics") + // Note 0 metrics are emitted for the purpose of aggregation. For example, when the succeededCount metrics are [0, 1, 0, 1], we can calculate the success rate as 2 / 4 = 50%. However, when 0 are excluded, the metrics becomes [1, 1], and you would not be able to calculate the success rate from that series. + erroredCount, failedCount, succeededCount := 0, 0, 0 + if r.err != nil { + erroredCount = 1 + } else if !r.testCommandResult.Successful() { + failedCount = 1 + } else { + succeededCount = 1 + } + + data := &cloudwatch.MetricDatum{ + Unit: aws.String("Count"), + Dimensions: []*cloudwatch.Dimension{ + { + Name: aws.String("Provider"), + Value: aws.String(getProviderName(r.conf.Regex)), + }, + { + Name: aws.String("BranchName"), + Value: aws.String(r.conf.BranchName), + }, + }, + Timestamp: aws.Time(time.Now()), + } + putMetric(data, "ErroredInstanceTests", erroredCount) + putMetric(data, "FailedInstanceTests", failedCount) + putMetric(data, "SucceededInstanceTests", succeededCount) + + // TODO: publish time metrics + logger.Info("Test instance metrics published") +} + +func getProviderName(testRe string) string { + providerRe := regexp.MustCompile(`Test((?i:vsphere)|(?i:cloudstack)|(?i:snow)|(?i:docker)|(?i:nutanix)|(?i:tinkerbell))`) + provider := []byte("Unknown") + t := providerRe.FindSubmatch([]byte(testRe)) + if len(t) > 1 { + provider = t[1] + } + return strings.ToLower(string(provider)) +} + +func putMetric(data *cloudwatch.MetricDatum, metricName string, value int) { + data.MetricName = aws.String(metricName) + data.Value = aws.Float64(float64(value)) + + if _, err := svc.PutMetricData(&cloudwatch.PutMetricDataInput{ + Namespace: aws.String("EksaE2ETests"), + MetricData: []*cloudwatch.MetricDatum{data}, + }); err != nil { + logger.Error(err, "Cannot put metrics to cloudwatch") + } else { + logger.Info("Instance test result metrics published") + } +} diff --git a/internal/test/e2e/run.go b/internal/test/e2e/run.go index d1417f5c1b69..595fae1db8bb 100644 --- a/internal/test/e2e/run.go +++ b/internal/test/e2e/run.go @@ -169,6 +169,7 @@ func RunTestsInParallel(conf ParallelRunConf) error { "completedInstances", completedInstances, "totalInstances", totalInstances, ) + putInstanceTestResultMetrics(r) } if failedInstances > 0 { From 71dea4b29f18e1a69ab67d577e28b81b38b62194 Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Thu, 2 May 2024 12:02:21 -0700 Subject: [PATCH 115/193] CLI commands support for packages in airgapped admin (#8026) --- cmd/eksctl-anywhere/cmd/common.go | 21 ++++++++++++++++--- cmd/eksctl-anywhere/cmd/generatepackage.go | 21 ++++++++++++++++++- .../cmd/installpackagecontroller.go | 7 ++++++- go.mod | 2 +- 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/cmd/eksctl-anywhere/cmd/common.go b/cmd/eksctl-anywhere/cmd/common.go index f24cb8a41fd9..9fc05856dc43 100644 --- a/cmd/eksctl-anywhere/cmd/common.go +++ b/cmd/eksctl-anywhere/cmd/common.go @@ -3,12 +3,14 @@ package cmd import ( "context" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/dependencies" "github.com/aws/eks-anywhere/pkg/files" "github.com/aws/eks-anywhere/pkg/helm" "github.com/aws/eks-anywhere/pkg/kubeconfig" "github.com/aws/eks-anywhere/pkg/manifests/bundles" + "github.com/aws/eks-anywhere/pkg/registrymirror" "github.com/aws/eks-anywhere/pkg/version" "github.com/aws/eks-anywhere/release/api/v1alpha1" ) @@ -50,7 +52,7 @@ func getKubeconfigPath(clusterName, override string) string { func NewDependenciesForPackages(ctx context.Context, opts ...PackageOpt) (*dependencies.Dependencies, error) { config := New(opts...) - return dependencies.NewFactory(). + f := dependencies.NewFactory(). WithExecutableMountDirs(config.mountPaths...). WithCustomBundles(config.bundlesOverride). WithExecutableBuilder(). @@ -59,8 +61,13 @@ func NewDependenciesForPackages(ctx context.Context, opts ...PackageOpt) (*depen WithHelm(helm.WithInsecure()). WithCuratedPackagesRegistry(config.registryName, config.kubeVersion, version.Get()). WithPackageControllerClient(config.spec, config.kubeConfig). - WithLogger(). - Build(ctx) + WithLogger() + + if config.cluster != nil && config.cluster.Spec.RegistryMirrorConfiguration != nil { + f.WithRegistryMirror(registrymirror.FromCluster(config.cluster)) + } + + return f.Build(ctx) } type PackageOpt func(*PackageConfig) @@ -72,6 +79,7 @@ type PackageConfig struct { mountPaths []string spec *cluster.Spec bundlesOverride string + cluster *anywherev1.Cluster } func New(options ...PackageOpt) *PackageConfig { @@ -118,3 +126,10 @@ func WithBundlesOverride(bundlesOverride string) func(*PackageConfig) { config.bundlesOverride = bundlesOverride } } + +// WithCluster sets cluster in the config with incoming value. +func WithCluster(cluster *anywherev1.Cluster) func(config *PackageConfig) { + return func(config *PackageConfig) { + config.cluster = cluster + } +} diff --git a/cmd/eksctl-anywhere/cmd/generatepackage.go b/cmd/eksctl-anywhere/cmd/generatepackage.go index e75896f3f3eb..37a5501e3fc7 100644 --- a/cmd/eksctl-anywhere/cmd/generatepackage.go +++ b/cmd/eksctl-anywhere/cmd/generatepackage.go @@ -6,7 +6,11 @@ import ( "log" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/types" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/curatedpackages" "github.com/aws/eks-anywhere/pkg/kubeconfig" ) @@ -70,7 +74,22 @@ func generatePackages(ctx context.Context, args []string) error { return err } - deps, err := NewDependenciesForPackages(ctx, WithRegistryName(gpOptions.registry), WithKubeVersion(gpOptions.kubeVersion), WithMountPaths(kubeConfig), WithBundlesOverride(gpOptions.bundlesOverride)) + k8sClient, err := kubernetes.NewRuntimeClientFromFileName(kubeConfig) + if err != nil { + return fmt.Errorf("unable to initalize k8s client: %v", err) + } + + cluster := &anywherev1.Cluster{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: gpOptions.clusterName, Namespace: constants.DefaultNamespace}, cluster); err != nil { + return fmt.Errorf("unable to get cluster %s: %v", gpOptions.clusterName, err) + } + + deps, err := NewDependenciesForPackages(ctx, + WithRegistryName(gpOptions.registry), + WithKubeVersion(gpOptions.kubeVersion), + WithMountPaths(kubeConfig), + WithBundlesOverride(gpOptions.bundlesOverride), + WithCluster(cluster)) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) } diff --git a/cmd/eksctl-anywhere/cmd/installpackagecontroller.go b/cmd/eksctl-anywhere/cmd/installpackagecontroller.go index 28cb2dd421c2..17e9196f491a 100644 --- a/cmd/eksctl-anywhere/cmd/installpackagecontroller.go +++ b/cmd/eksctl-anywhere/cmd/installpackagecontroller.go @@ -59,7 +59,12 @@ func installPackageController(ctx context.Context) error { return fmt.Errorf("the cluster config file provided is invalid: %v", err) } - deps, err := NewDependenciesForPackages(ctx, WithMountPaths(kubeConfig), WithClusterSpec(clusterSpec), WithKubeConfig(ico.kubeConfig), WithBundlesOverride(ico.bundlesOverride)) + deps, err := NewDependenciesForPackages(ctx, + WithMountPaths(kubeConfig), + WithClusterSpec(clusterSpec), + WithKubeConfig(ico.kubeConfig), + WithBundlesOverride(ico.bundlesOverride), + WithCluster(clusterSpec.Cluster)) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) } diff --git a/go.mod b/go.mod index ec279d55d694..93041cddb905 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,6 @@ require ( golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa golang.org/x/net v0.23.0 golang.org/x/oauth2 v0.15.0 - golang.org/x/sys v0.18.0 golang.org/x/text v0.14.0 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 @@ -183,6 +182,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.17.0 // indirect From 49e3c77eeceb60bdb4b9c1d61a1ef0f187355ebf Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Fri, 3 May 2024 00:51:00 -0700 Subject: [PATCH 116/193] [PR BOT] Generate release testdata files (#8095) --- .../testdata/main-bundle-release.yaml | 156 +++++++++--------- 1 file changed, 78 insertions(+), 78 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index c767fa6b566d..a8c0d5ec17e9 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -112,26 +112,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 + imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 + imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d + imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 - version: v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 + version: v1.13.15-eksa.1 cloudStack: clusterAPIController: arch: @@ -252,7 +252,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -306,7 +306,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -315,9 +315,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -326,7 +326,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -890,26 +890,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 + imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 + imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d + imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 - version: v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 + version: v1.13.15-eksa.1 cloudStack: clusterAPIController: arch: @@ -1030,7 +1030,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -1084,7 +1084,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1093,9 +1093,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1104,7 +1104,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -1668,26 +1668,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 + imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 + imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d + imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 - version: v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 + version: v1.13.15-eksa.1 cloudStack: clusterAPIController: arch: @@ -1808,7 +1808,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -1862,7 +1862,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1871,9 +1871,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1882,7 +1882,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -2446,26 +2446,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 + imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 + imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d + imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 - version: v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 + version: v1.13.15-eksa.1 cloudStack: clusterAPIController: arch: @@ -2586,7 +2586,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -2640,7 +2640,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -2649,9 +2649,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -2660,7 +2660,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -3224,26 +3224,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 + imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 + imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d + imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 - version: v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 + version: v1.13.15-eksa.1 cloudStack: clusterAPIController: arch: @@ -3364,7 +3364,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -3418,7 +3418,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -3427,9 +3427,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -3438,7 +3438,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -4002,26 +4002,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:638f2f8f998b7f94afac1a2a2e6b65595fd98fe2cfa379d59ce6d577799c0b79 + imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:6c964c5320db446d25b4badd38b9b128b00c713865488cd54c53098a6c313252 + imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.13-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:dad5ec573a36ed1c973b4007c616bdfbcad3c010a3fa361823bf03941c81030d + imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.13-eksa.1 - version: v1.13.13-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 + version: v1.13.15-eksa.1 cloudStack: clusterAPIController: arch: @@ -4142,7 +4142,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.15/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -4196,7 +4196,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -4205,9 +4205,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.3/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -4216,7 +4216,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: From 5f6ce7e27e3cb85aa5bee46d70093dc8778d116c Mon Sep 17 00:00:00 2001 From: Xu Deng Date: Fri, 3 May 2024 13:32:01 -0400 Subject: [PATCH 117/193] Change the cloudwatch namespace of quick e2e test metrics (#8101) --- .../build/buildspecs/quick-test-eks-a-cli.yml | 1 + internal/test/e2e/cloudwatch.go | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index 2c519ed25e66..d17f786e86e6 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -2,6 +2,7 @@ version: 0.2 env: variables: + INTEGRATION_TEST_CLOUDWATCH_NAMESPACE_OVERRIDE: EksaQuickE2ETests INTEGRATION_TEST_MAX_EC2_COUNT: 180 INTEGRATION_TEST_MAX_CONCURRENT_TEST_COUNT: 180 EKSA_GIT_KNOWN_HOSTS: "/tmp/known_hosts" diff --git a/internal/test/e2e/cloudwatch.go b/internal/test/e2e/cloudwatch.go index fe0a7e65c54a..461e8ab2f972 100644 --- a/internal/test/e2e/cloudwatch.go +++ b/internal/test/e2e/cloudwatch.go @@ -2,6 +2,7 @@ package e2e import ( "fmt" + "os" "regexp" "strings" "time" @@ -15,6 +16,8 @@ import ( var svc *cloudwatch.CloudWatch +const integrationTestCloudWatchNamespaceOverrideEnvVar = "INTEGRATION_TEST_CLOUDWATCH_NAMESPACE_OVERRIDE" + func init() { if s, err := session.NewSession(); err == nil { svc = cloudwatch.New(s) @@ -76,12 +79,18 @@ func putMetric(data *cloudwatch.MetricDatum, metricName string, value int) { data.MetricName = aws.String(metricName) data.Value = aws.Float64(float64(value)) + namespace := "EksaE2ETests" + namespaceOverride := os.Getenv(integrationTestCloudWatchNamespaceOverrideEnvVar) + if namespaceOverride != "" { + namespace = namespaceOverride + } + if _, err := svc.PutMetricData(&cloudwatch.PutMetricDataInput{ - Namespace: aws.String("EksaE2ETests"), + Namespace: aws.String(namespace), MetricData: []*cloudwatch.MetricDatum{data}, }); err != nil { - logger.Error(err, "Cannot put metrics to cloudwatch") + logger.Error(err, "Cannot put metrics to cloudwatch", "metricName", metricName, "value", value) } else { - logger.Info("Instance test result metrics published") + logger.Info("Instance test result metrics published", "metricName", metricName, "value", value) } } From 058237ca2e59d60921967453bfad39296f57f889 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Fri, 3 May 2024 11:21:01 -0700 Subject: [PATCH 118/193] Combine multiple upgrades for tinkerbell tests into single test (#8102) * Combine multiple upgrades for tinkerbell tests into single test Signed-off-by: Rahul Ganesh * Generate cluster config for test Signed-off-by: Rahul Ganesh * Add the test to hardware count file and skip single version upgrade tests Signed-off-by: Rahul Ganesh --------- Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/SKIPPED_TESTS.yaml | 5 +++ test/e2e/TINKERBELL_HARDWARE_COUNT.yaml | 3 +- test/e2e/tinkerbell_test.go | 58 +++++++++++++++++++++++++ test/e2e/upgrade.go | 13 ++++++ 4 files changed, 78 insertions(+), 1 deletion(-) diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index cf183bd73ba5..75d398e9b0d5 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -81,6 +81,11 @@ skipped_tests: - TestTinkerbellKubernetes125UbuntuWorkerNodeScaleUpWithAPI - TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI - TestTinkerbellSingleNode125ManagementScaleupWorkloadWithAPI +#Skip single K8s version upgrade tests as the same is covered by multiple K8s version upgrade from 1.25 to 1.29 to save on hardware resources and running time +- TestTinkerbellKubernetes125UbuntuTo126Upgrade +- TestTinkerbellKubernetes126UbuntuTo127Upgrade +- TestTinkerbellKubernetes127UbuntuTo128Upgrade +- TestTinkerbellKubernetes128UbuntuTo129Upgrade # Tinkerbell Packages # Skip test cases for packages other than hello-eks-anywhere and not for K 1.28. - TestTinkerbellKubernetes126UbuntuSingleNodeCuratedPackagesEmissaryFlow diff --git a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml index 4cae24499de4..ec5a7b189bdf 100644 --- a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml +++ b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml @@ -126,4 +126,5 @@ TestTinkerbellKubernetes125UbuntuTo126SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes126UbuntuTo127SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes127UbuntuTo128SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes128UbuntuTo129SingleNodeInPlaceUpgrade: 1 -TestTinkerbellKubernetes128UpgradeManagementComponents: 2 \ No newline at end of file +TestTinkerbellKubernetes128UpgradeManagementComponents: 2 +TestTinkerbellKubernetes125UbuntuTo129MultipleUpgrade: 4 \ No newline at end of file diff --git a/test/e2e/tinkerbell_test.go b/test/e2e/tinkerbell_test.go index ba209136228e..99cc9281942f 100644 --- a/test/e2e/tinkerbell_test.go +++ b/test/e2e/tinkerbell_test.go @@ -1649,3 +1649,61 @@ func TestTinkerbellKubernetes128UpgradeManagementComponents(t *testing.T) { test.RunEKSA([]string{"upgrade", "management-components", "-f", test.ClusterConfigLocation, "-v", "99"}) test.DeleteCluster() } + +// TestTinkerbellKubernetes125UbuntuTo129MultipleUpgrade creates a single 1.25 cluster and upgrades it +// all the way until 1.29. This tests each K8s version upgrade in a single test and saves up +// hardware which would otherwise be needed for each test as part of both create and upgrade. +func TestTinkerbellKubernetes125UbuntuTo129MultipleUpgrade(t *testing.T) { + var kube126clusterOpts []framework.ClusterE2ETestOpt + var kube127clusterOpts []framework.ClusterE2ETestOpt + var kube128clusterOpts []framework.ClusterE2ETestOpt + var kube129clusterOpts []framework.ClusterE2ETestOpt + provider := framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + ) + + kube126clusterOpts = append( + kube126clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube126), + ), + provider.WithProviderUpgrade(framework.Ubuntu126Image()), + ) + kube127clusterOpts = append( + kube127clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube127), + ), + provider.WithProviderUpgrade(framework.Ubuntu127Image()), + ) + kube128clusterOpts = append( + kube128clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube128), + ), + provider.WithProviderUpgrade(framework.Ubuntu128Image()), + ) + kube129clusterOpts = append( + kube129clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube129), + ), + provider.WithProviderUpgrade(framework.Ubuntu129Image()), + ) + runMultipleUpgradesFlowForBareMetal( + test, + kube126clusterOpts, + kube127clusterOpts, + kube128clusterOpts, + kube129clusterOpts, + ) +} diff --git a/test/e2e/upgrade.go b/test/e2e/upgrade.go index 4b02142a1ac4..30288d2e6b91 100644 --- a/test/e2e/upgrade.go +++ b/test/e2e/upgrade.go @@ -93,6 +93,19 @@ func runInPlaceUpgradeFlowForBareMetal(test *framework.ClusterE2ETest, clusterOp test.ValidateHardwareDecommissioned() } +func runMultipleUpgradesFlowForBareMetal(test *framework.ClusterE2ETest, clusterOpts ...[]framework.ClusterE2ETestOpt) { + test.GenerateHardwareConfig() + test.CreateCluster(framework.WithControlPlaneWaitTimeout("20m")) + for _, opts := range clusterOpts { + test.UpgradeClusterWithNewConfig(opts) + test.GenerateSupportBundleOnCleanupIfTestFailed() + test.ValidateClusterState() + test.StopIfFailed() + } + test.DeleteCluster() + test.ValidateHardwareDecommissioned() +} + // runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration runs the Create, Upgrade and Delete cluster flows // for Baremetal that use the cluster config generated by the WithClusterConfig method when the test object is created, // and avoids regenerating a cluster config with defaults. From 14dedabe89cdf451ab0f86a070714b27d10d1fa9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 May 2024 09:45:12 -0700 Subject: [PATCH 119/193] Bump github.com/aws/aws-sdk-go-v2/service/ecr from 1.24.7 to 1.27.4 (#7937) Bumps [github.com/aws/aws-sdk-go-v2/service/ecr](https://github.com/aws/aws-sdk-go-v2) from 1.24.7 to 1.27.4. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ecr/v1.24.7...config/v1.27.4) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/ecr dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 93041cddb905..073d72efcfe2 100644 --- a/go.mod +++ b/go.mod @@ -5,18 +5,18 @@ go 1.21 require ( github.com/Masterminds/sprig v2.22.0+incompatible github.com/aws/aws-sdk-go v1.50.36 - github.com/aws/aws-sdk-go-v2 v1.25.3 + github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/aws-sdk-go-v2/config v1.26.6 github.com/aws/aws-sdk-go-v2/credentials v1.17.7 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3 github.com/aws/aws-sdk-go-v2/service/ec2 v1.146.0 - github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 + github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 github.com/aws/eks-anywhere-packages v0.3.9 github.com/aws/eks-anywhere/internal/aws-sdk-go-v2/service/snowballdevice v0.0.0-00010101000000-000000000000 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e github.com/aws/etcdadm-bootstrap-provider v1.0.12 github.com/aws/etcdadm-controller v1.0.19 - github.com/aws/smithy-go v1.20.1 + github.com/aws/smithy-go v1.20.2 github.com/bmc-toolbox/bmclib/v2 v2.1.1-0.20231206130132-1063371b9ed6 github.com/docker/cli v25.0.3+incompatible github.com/ghodss/yaml v1.0.0 @@ -83,8 +83,8 @@ require ( github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/VictorLowther/simplexml v0.0.0-20180716164440-0bff93621230 // indirect github.com/VictorLowther/soap v0.0.0-20150314151524-8e36fca84b22 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 // indirect diff --git a/go.sum b/go.sum index 602b3d6c6c84..2d62273a40dd 100644 --- a/go.sum +++ b/go.sum @@ -124,24 +124,24 @@ github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z github.com/aws/aws-sdk-go v1.42.23/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs= github.com/aws/aws-sdk-go v1.50.36 h1:PjWXHwZPuTLMR1NIb8nEjLucZBMzmf84TLoLbD8BZqk= github.com/aws/aws-sdk-go v1.50.36/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.25.3 h1:xYiLpZTQs1mzvz5PaI6uR0Wh57ippuEthxS4iK5v0n0= -github.com/aws/aws-sdk-go-v2 v1.25.3/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= github.com/aws/aws-sdk-go-v2/credentials v1.17.7 h1:WJd+ubWKoBeRh7A5iNMnxEOs982SyVKOJD+K8HIezu4= github.com/aws/aws-sdk-go-v2/credentials v1.17.7/go.mod h1:UQi7LMR0Vhvs+44w5ec8Q+VS+cd10cjwgHwiVkE0YGU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3 h1:p+y7FvkK2dxS+FEwRIDHDe//ZX+jDhP8HHE50ppj4iI= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.3/go.mod h1:/fYB+FZbDlwlAiynK9KDXlzZl3ANI9JkD0Uhz5FjNT4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3 h1:ifbIbHZyGl1alsAhPIYsHOg5MuApgqOvVeI8wIugXfs= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.3/go.mod h1:oQZXg3c6SNeY6OZrDY+xHcF4VGIEoNotX2B4PrDeoJI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3 h1:Qvodo9gHG9F3E8SfYOspPeBt0bjSbsevK8WhRAUHcoY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.3/go.mod h1:vCKrdLXtybdf/uQd/YfVR2r5pcbNuEYKzMQpcxmeSJw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/service/ec2 v1.146.0 h1:d6pYx/CKADORpxqBINY7DuD4V1fjcj3IoeTPQilCw4Q= github.com/aws/aws-sdk-go-v2/service/ec2 v1.146.0/go.mod h1:hIsHE0PaWAQakLCshKS7VKWMGXaqrAFp4m95s2W9E6c= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 h1:3iaT/LnGV6jNtbBkvHZDlzz7Ky3wMHDJAyFtGd5GUJI= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7/go.mod h1:mtzCLxk6M+KZbkJdq3cUH9GCrudw8qCy5C3EHO+5vLc= +github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 h1:Qr9W21mzWT3RhfYn9iAux7CeRIdbnTAqmiOlASqQgZI= +github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 h1:K/NXvIftOlX+oGgWGIa3jDyYLDNsdVhsjHmsBH2GLAQ= @@ -160,8 +160,8 @@ github.com/aws/etcdadm-bootstrap-provider v1.0.12 h1:jSUKR+2wETNpjmYmtEC2a/SBbul github.com/aws/etcdadm-bootstrap-provider v1.0.12/go.mod h1:6hc4wSlAkioU7EAGCW8fg2F+w42OTgLxjs4/nVzxPQw= github.com/aws/etcdadm-controller v1.0.19 h1:AC6LLHb6hb02Fus3RanUvzJeRoiORGZQ3/d/UjKbsHY= github.com/aws/etcdadm-controller v1.0.19/go.mod h1:L710y0if8mrJhCmOQSUJF+9QcEOiemd4jXkKIc5Oeok= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 6fc394565dec6ba3ac90f717a7a7b1324f3ed350 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Mon, 6 May 2024 15:05:03 -0700 Subject: [PATCH 120/193] Update airgapped configuration step (#8104) --- docs/content/en/docs/getting-started/airgapped/airgap-steps.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/docs/getting-started/airgapped/airgap-steps.md b/docs/content/en/docs/getting-started/airgapped/airgap-steps.md index 7b12e53e0c0e..60773faa7fac 100644 --- a/docs/content/en/docs/getting-started/airgapped/airgap-steps.md +++ b/docs/content/en/docs/getting-started/airgapped/airgap-steps.md @@ -18,7 +18,7 @@ toc_hide: true 1. Set up a local registry mirror to host the downloaded EKS Anywhere images and configure your Admin machine with the certificates and authentication information if your registry requires it. For details, refer to the [Registry Mirror Configuration documentation.]({{< relref "../../getting-started/optional/registrymirror/#configure-local-registry-mirror" >}}) -1. Import images to the local registry mirror using the following command. Set `REGISTRY_MIRROR_URL` to the url of the local registry mirror you created in the previous step. This command may take several minutes to complete. To monitor the progress of the command, you can run with the `-v 6` command line argument. +1. Import images to the local registry mirror using the following command. Set `REGISTRY_MIRROR_URL` to the url of the local registry mirror you created in the previous step. This command may take several minutes to complete. To monitor the progress of the command, you can run with the `-v 6` command line argument. When using self-signed certificates for your registry, you should run with the `--insecure` command line argument to indicate skipping TLS verification while pushing helm charts and bundles. ```bash export REGISTRY_MIRROR_URL= ``` From 6635a1e727c986f3286168bf69731e1366054f02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 May 2024 19:20:08 -0700 Subject: [PATCH 121/193] Bump codecov/codecov-action from 4.3.0 to 4.3.1 (#8108) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.3.0 to 4.3.1. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4.3.0...v4.3.1) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/go-coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go-coverage.yml b/.github/workflows/go-coverage.yml index e417d5f79801..d271501d86dd 100644 --- a/.github/workflows/go-coverage.yml +++ b/.github/workflows/go-coverage.yml @@ -22,7 +22,7 @@ jobs: - name: Run go test with coverage run: COVER_PROFILE=coverage.txt make coverage-unit-test - name: Codecov upload - uses: codecov/codecov-action@v4.3.0 + uses: codecov/codecov-action@v4.3.1 with: files: ./coverage.txt token: ${{ secrets.CODECOV_TOKEN }} From d87051a6cceff1a4d65a16a3cc3fdf478870533b Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Mon, 6 May 2024 19:40:04 -0700 Subject: [PATCH 122/193] [PR BOT] Generate release testdata files (#8111) --- .../testdata/main-bundle-release.yaml | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index a8c0d5ec17e9..ced0fc2150bb 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -57,7 +57,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-37-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-38-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -289,10 +289,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-37-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-38-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.25.16 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-37.yaml - name: kubernetes-1-25-eks-37 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-38.yaml + name: kubernetes-1-25-eks-38 ova: bottlerocket: {} raw: @@ -507,7 +507,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-37-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-38-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -741,7 +741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-37-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-38-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -835,7 +835,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-33-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-34-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1067,10 +1067,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.14-eks-d-1-26-33-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.26.14 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-33.yaml - name: kubernetes-1-26-eks-33 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.15-eks-d-1-26-34-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.26.15 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-34.yaml + name: kubernetes-1-26-eks-34 ova: bottlerocket: {} raw: @@ -1285,7 +1285,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-33-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-34-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -1519,7 +1519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-33-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-34-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -1613,7 +1613,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-27-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-28-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1845,10 +1845,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.11-eks-d-1-27-27-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.27.11 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-27.yaml - name: kubernetes-1-27-eks-27 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.12-eks-d-1-27-28-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.27.12 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-28.yaml + name: kubernetes-1-27-eks-28 ova: bottlerocket: {} raw: @@ -2063,7 +2063,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-27-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-28-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -2297,7 +2297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-27-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-28-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -2391,7 +2391,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-21-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -2623,10 +2623,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.7-eks-d-1-28-20-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.28.7 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-20.yaml - name: kubernetes-1-28-eks-20 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.8-eks-d-1-28-21-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.28.8 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-21.yaml + name: kubernetes-1-28-eks-21 ova: bottlerocket: {} raw: @@ -2841,7 +2841,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-21-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3075,7 +3075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-20-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-21-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3169,7 +3169,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-10-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -3401,10 +3401,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.1-eks-d-1-29-9-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.29.1 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-9.yaml - name: kubernetes-1-29-eks-9 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.3-eks-d-1-29-10-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.29.3 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-10.yaml + name: kubernetes-1-29-eks-10 ova: bottlerocket: {} raw: @@ -3619,7 +3619,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-10-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3853,7 +3853,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-10-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3947,7 +3947,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-3-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -4179,10 +4179,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-eks-d-1-30-2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-eks-d-1-30-3-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.30.0 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-2.yaml - name: kubernetes-1-30-eks-2 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-3.yaml + name: kubernetes-1-30-eks-3 ova: bottlerocket: {} raw: @@ -4397,7 +4397,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-3-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -4631,7 +4631,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-3-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: From 3f465858c5e36f5bfb82cee7e65e277d5a0dddb3 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 7 May 2024 11:53:15 -0700 Subject: [PATCH 123/193] Add docs for granular MHC timeout and `maxUnhealthy` configuration (#8112) --- .../getting-started/baremetal/bare-spec.md | 2 +- .../getting-started/cloudstack/cloud-spec.md | 2 +- .../getting-started/nutanix/nutanix-spec.md | 2 +- .../getting-started/optional/healthchecks.md | 100 +++++++++++++++--- .../en/docs/getting-started/snow/snow-spec.md | 2 +- .../getting-started/vsphere/vsphere-spec.md | 2 +- 6 files changed, 90 insertions(+), 20 deletions(-) diff --git a/docs/content/en/docs/getting-started/baremetal/bare-spec.md b/docs/content/en/docs/getting-started/baremetal/bare-spec.md index f8ae2746ad31..52ee32da1a95 100644 --- a/docs/content/en/docs/getting-started/baremetal/bare-spec.md +++ b/docs/content/en/docs/getting-started/baremetal/bare-spec.md @@ -18,7 +18,7 @@ The following additional optional configuration can also be included: * [IAM Authenticator]({{< relref "../optional/iamauth.md" >}}) * [OIDC]({{< relref "../optional/oidc.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) * [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) To generate your own cluster configuration, follow instructions from the [Create Bare Metal cluster]({{< relref "./baremetal-getstarted" >}}) section and modify it using descriptions below. diff --git a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md index 94235215647c..ac9c064c2fae 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md @@ -17,7 +17,7 @@ The following additional optional configuration can also be included: * [GitOps]({{< relref "../optional/gitops.md" >}}) * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) * [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md index 41f969da39a0..16378782e32c 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md @@ -19,7 +19,7 @@ The following additional optional configuration can also be included: * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Gitops]({{< relref "../optional/gitops.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) * [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml diff --git a/docs/content/en/docs/getting-started/optional/healthchecks.md b/docs/content/en/docs/getting-started/optional/healthchecks.md index 198601350441..23162f08c2ea 100644 --- a/docs/content/en/docs/getting-started/optional/healthchecks.md +++ b/docs/content/en/docs/getting-started/optional/healthchecks.md @@ -1,26 +1,29 @@ --- -title: "Machine Health Checks" -linkTitle: "Machine Health Checks" +title: "MachineHealthCheck" +linkTitle: "MachineHealthCheck" weight: 40 aliases: /docs/reference/clusterspec/optional/healthchecks/ description: > - EKS Anywhere cluster yaml specification for machine health check configuration + EKS Anywhere cluster yaml specification for MachineHealthCheck configuration --- -## Machine Health Checks Support +## MachineHealthCheck Support #### Provider support details | | vSphere | Bare Metal | Nutanix | CloudStack | Snow | |:--------------:|:-------:|:----------:|:-------:|:----------:|:----:| | **Supported?** | ✓ | ✓ | ✓ | ✓ | ✓ | -You can configure EKS Anywhere to specify timeouts for machine health checks. -A Machine Health Check is a resource which allows users to define conditions under which Machines within a Cluster should be considered unhealthy. A Machine Health Check is defined on a management cluster and scoped to a particular workload cluster. If not configured in the spec, the default values are used to configure the machine health checks. +You can configure EKS Anywhere to specify timeouts and `maxUnhealthy` values for machine health checks. -Note: Even though the configuration on machine health check timeouts in the EKSA spec is optional, machine health checks are still installed for all clusters using the default timeout values mentioned below. +A MachineHealthCheck (MHC) is a resource in Cluster API which allows users to define conditions under which Machines within a Cluster should be considered unhealthy. A MachineHealthCheck is defined on a management cluster and scoped to a particular workload cluster. -The following cluster spec shows an example of how to configure health check timeouts: +Note: Even though the MachineHealthCheck configuration in the EKS-A spec is optional, MachineHealthChecks are still installed for all clusters using the default values mentioned below. + +EKS Anywhere allows users to have granular control over MachineHealthChecks in their cluster configuration, with default values (derived from Cluster API) being applied if the MHC is not configured in the spec. The top-level `machineHealthCheck` field governs the global MachineHealthCheck settings for all Machines (control-plane and worker). These global settings can be overridden through the nested `machineHealthCheck` field in the control plane configuration and each worker node configuration. If the nested MHC fields are not configured, then the top-level settings are applied to the respective Machines. + +The following cluster spec shows an example of how to configure health check timeouts and `maxUnhealthy`: ```yaml apiVersion: anywhere.eks.amazonaws.com/v1alpha1 kind: Cluster @@ -28,22 +31,89 @@ metadata: name: my-cluster-name spec: ... - machineHealthCheck: + machineHealthCheck: # Top-level MachineHealthCheck configuration + maxUnhealthy: "60%" nodeStartupTimeout: "10m0s" unhealthyMachineTimeout: "5m0s" + ... + controlPlaneConfiguration: # MachineHealthCheck configuration for Control plane + machineHealthCheck: + maxUnhealthy: 100% + nodeStartupTimeout: "15m0s" + unhealthyMachineTimeout: 10m + ... + workerNodeGroupConfigurations: + - count: 1 + name: md-0 + machineHealthCheck: # MachineHealthCheck configuration for Worker Node Group 0 + maxUnhealthy: 100% + nodeStartupTimeout: "10m0s" + unhealthyMachineTimeout: 20m + - count: 1 + name: md-1 + machineHealthCheck: # MachineHealthCheck configuration for Worker Node Group 1 + maxUnhealthy: 100% + nodeStartupTimeout: "10m0s" + unhealthyMachineTimeout: 20m + ... ``` -## Machine Health Check Spec Details +## MachineHealthCheck Spec Details ### __machineHealthCheck__ (optional) -* __Description__: top level key; required to configure machine health check timeouts. +* __Description__: top-level key; required to configure global MachineHealthCheck timeouts and `maxUnhealthy`. * __Type__: object -### __nodeStartupTimeout__ (optional) -* __Description__: determines how long a Machine Health Check should wait for a Node to join the cluster, before considering a Machine unhealthy. +### __machineHealthCheck.maxUnhealthy__ (optional) +* __Description__: determines the maximum permissible number or percentage of unhealthy Machines in a cluster before further remediation is prevented. This ensures that MachineHealthChecks only remediate Machines when the cluster is healthy. +* __Default__: ```100%``` for control plane machines, ```40%``` for worker nodes (Cluster API defaults). +* __Type__: integer (count) or string (percentage) + +### __machineHealthCheck.nodeStartupTimeout__ (optional) +* __Description__: determines how long a MachineHealthCheck should wait for a Node to join the cluster, before considering a Machine unhealthy. * __Default__: ```20m0s``` for Tinkerbell provider, ```10m0s``` for all other providers. * __Minimum Value (If configured)__: ```30s``` * __Type__: string -### __unhealthyMachineTimeout__ (optional) -* __Description__: if the unhealthy condition is matched for the duration of this timeout, the Machine is considered unhealthy. +### __machineHealthCheck.unhealthyMachineTimeout__ (optional) +* __Description__: determines how long the unhealthy Node conditions (e.g., `Ready=False`, `Ready=Unknown`) should be matched for, before considering a Machine unhealthy. * __Default__: ```5m0s``` * __Type__: string + +### __controlPlaneConfiguration.machineHealthCheck__ (optional) +* __Description__: Control plane level configuration for MachineHealthCheck timeouts and `maxUnhealthy` values. +* __Type__: object + +### __controlPlaneConfiguration.machineHealthCheck.maxUnhealthy__ (optional) +* __Description__: determines the maximum permissible number or percentage of unhealthy control plane Machines in a cluster before further remediation is prevented. This ensures that MachineHealthChecks only remediate Machines when the cluster is healthy. +* __Default__: Top-level MHC `maxUnhealthy` if set or ```100%``` otherwise. +* __Type__: integer (count) or string (percentage) + +### __controlPlaneConfiguration.machineHealthCheck.nodeStartupTimeout__ (optional) +* __Description__: determines how long a MachineHealthCheck should wait for a control plane Node to join the cluster, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```20m0s``` for Tinkerbell provider, ```10m0s``` for all other providers otherwise. +* __Minimum Value (if configured)__: ```30s``` +* __Type__: string + +### __controlPlaneConfiguration.machineHealthCheck.unhealthyMachineTimeout__ (optional) +* __Description__: determines how long the unhealthy conditions (e.g., `Ready=False`, `Ready=Unknown`) should be matched for a control plane Node, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```5m0s``` otherwise. +* __Type__: string + +### __workerNodeGroupConfigurations.machineHealthCheck__ (optional) +* __Description__: Worker node level configuration for MachineHealthCheck timeouts and `maxUnhealthy` values. +* __Type__: object + +### __workerNodeGroupConfigurations.machineHealthCheck.maxUnhealthy__ (optional) +* __Description__: determines the maximum permissible number or percentage of unhealthy worker Machines in a cluster before further remediation is prevented. This ensures that MachineHealthChecks only remediate Machines when the cluster is healthy. +* __Default__: Top-level MHC `maxUnhealthy` if set or ```40%``` otherwise. +* __Type__: integer (count) or string (percentage) + +### __workerNodeGroupConfigurations.machineHealthCheck.nodeStartupTimeout__ (optional) +* __Description__: determines how long a MachineHealthCheck should wait for a worker Node to join the cluster, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```20m0s``` for Tinkerbell provider, ```10m0s``` for all other providers otherwise. +* __Minimum Value (if configured)__: ```30s``` +* __Type__: string + +### __workerNodeGroupConfigurations.machineHealthCheck.unhealthyMachineTimeout__ (optional) +* __Description__: determines how long the unhealthy conditions (e.g., `Ready=False`, `Ready=Unknown`) should be matched for a worker Node, before considering the Machine unhealthy. +* __Default__: Top-level MHC `nodeStartupTimeout` if set or ```5m0s``` otherwise. +* __Type__: string diff --git a/docs/content/en/docs/getting-started/snow/snow-spec.md b/docs/content/en/docs/getting-started/snow/snow-spec.md index 0290afde4de2..91952e68c6eb 100644 --- a/docs/content/en/docs/getting-started/snow/snow-spec.md +++ b/docs/content/en/docs/getting-started/snow/snow-spec.md @@ -17,7 +17,7 @@ The following additional optional configuration can also be included: * [GitOps]({{< relref "../optional/gitops.md" >}}) * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) * [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ```yaml diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md index 7e4e6f55cdd7..a0cfa4a257bd 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md @@ -110,7 +110,7 @@ The following additional optional configuration can also be included: * [Proxy]({{< relref "../optional/proxy.md" >}}) * [Registry Mirror]({{< relref "../optional/registrymirror.md" >}}) * [Host OS Config]({{< relref "../optional/hostOSConfig.md" >}}) -* [Machine Health Check Timeouts]({{< relref "../optional/healthchecks.md" >}}) +* [Machine Health Checks]({{< relref "../optional/healthchecks.md" >}}) * [API Server Extra Args]({{< relref "../optional/api-server-extra-args.md" >}}) ## Cluster Fields From b5f1dc44a2f80aea4ff60ffcb308f3eba0cab5bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 12:16:17 -0700 Subject: [PATCH 124/193] Bump github.com/aws/aws-sdk-go from 1.51.31 to 1.52.3 in /release/cli (#8113) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.51.31 to 1.52.3. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.51.31...v1.52.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 8cbf6df901df..9d2ad6610e8a 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.51.31 + github.com/aws/aws-sdk-go v1.52.3 github.com/aws/aws-sdk-go-v2 v1.26.1 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index ee624224165d..28c195b6c2fa 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.31 h1:4TM+sNc+Dzs7wY1sJ0+J8i60c6rkgnKP1pvPx8ghsSY= -github.com/aws/aws-sdk-go v1.51.31/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.52.3 h1:BNPJmHOXNoM/iBWJKrvaQvJOweRcp3KLpzdb65CfQwU= +github.com/aws/aws-sdk-go v1.52.3/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From 9fa332d4f671b31acce4e97608e983ccce845f19 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 12:16:23 -0700 Subject: [PATCH 125/193] Bump sigs.k8s.io/controller-runtime in /release/cli (#8114) Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.16.5 to 0.16.6. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.16.5...v0.16.6) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 9d2ad6610e8a..955cf7d78d5e 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -19,7 +19,7 @@ require ( helm.sh/helm/v3 v3.14.4 k8s.io/apimachinery v0.29.4 k8s.io/helm v2.17.0+incompatible - sigs.k8s.io/controller-runtime v0.16.5 + sigs.k8s.io/controller-runtime v0.16.6 sigs.k8s.io/yaml v1.4.0 ) diff --git a/release/cli/go.sum b/release/cli/go.sum index 28c195b6c2fa..76781c89fafc 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -919,8 +919,8 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= -sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw= -sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/controller-runtime v0.16.6 h1:FiXwTuFF5ZJKmozfP2Z0j7dh6kmxP4Ou1KLfxgKKC3I= +sigs.k8s.io/controller-runtime v0.16.6/go.mod h1:+dQzkZxnylD0u49e0a+7AR+vlibEBaThmPca7lTyUsI= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= From 713026d27f8fc4152a360b65366deac62cee6980 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 12:54:06 -0700 Subject: [PATCH 126/193] Bump the kubernetes group with 5 updates (#8106) * Bump the kubernetes group with 5 updates Bumps the kubernetes group with 5 updates: | Package | From | To | | --- | --- | --- | | [k8s.io/api](https://github.com/kubernetes/api) | `0.29.3` | `0.29.4` | | [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) | `0.29.3` | `0.29.4` | | [k8s.io/apiserver](https://github.com/kubernetes/apiserver) | `0.29.3` | `0.29.4` | | [k8s.io/client-go](https://github.com/kubernetes/client-go) | `0.29.3` | `0.29.4` | | [k8s.io/component-base](https://github.com/kubernetes/component-base) | `0.29.3` | `0.29.4` | Updates `k8s.io/api` from 0.29.3 to 0.29.4 - [Commits](https://github.com/kubernetes/api/compare/v0.29.3...v0.29.4) Updates `k8s.io/apimachinery` from 0.29.3 to 0.29.4 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.29.3...v0.29.4) Updates `k8s.io/apiserver` from 0.29.3 to 0.29.4 - [Commits](https://github.com/kubernetes/apiserver/compare/v0.29.3...v0.29.4) Updates `k8s.io/client-go` from 0.29.3 to 0.29.4 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.3...v0.29.4) Updates `k8s.io/component-base` from 0.29.3 to 0.29.4 - [Commits](https://github.com/kubernetes/component-base/compare/v0.29.3...v0.29.4) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/apiserver dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/component-base dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes ... Signed-off-by: dependabot[bot] * Update Go modules in release/cli --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Abhay Krishna Arunachalam --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- release/cli/go.mod | 8 ++++---- release/cli/go.sum | 16 ++++++++-------- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 073d72efcfe2..4c575a27bd1a 100644 --- a/go.mod +++ b/go.mod @@ -51,11 +51,11 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.14.2 - k8s.io/api v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/apiserver v0.29.3 - k8s.io/client-go v0.29.3 - k8s.io/component-base v0.29.3 + k8s.io/api v0.29.4 + k8s.io/apimachinery v0.29.4 + k8s.io/apiserver v0.29.4 + k8s.io/client-go v0.29.4 + k8s.io/component-base v0.29.4 k8s.io/klog/v2 v2.110.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e oras.land/oras-go v1.2.5 diff --git a/go.sum b/go.sum index 2d62273a40dd..d7026a334169 100644 --- a/go.sum +++ b/go.sum @@ -1468,29 +1468,29 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= +k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= +k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= -k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= +k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= +k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= k8s.io/cluster-bootstrap v0.28.5 h1:KyFY6l5xK5oxRjjGotgivlbQ0AReRctMMoNpxSJaJxM= k8s.io/cluster-bootstrap v0.28.5/go.mod h1:nJzrDb8AWtUm1RSoXx+lDb2f7i54Ndfx4v8x3s4kZ2Y= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= -k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= +k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= diff --git a/release/cli/go.mod b/release/cli/go.mod index 955cf7d78d5e..6859c6fc4eff 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -164,12 +164,12 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.3 // indirect + k8s.io/api v0.29.4 // indirect k8s.io/apiextensions-apiserver v0.29.1 // indirect - k8s.io/apiserver v0.29.3 // indirect + k8s.io/apiserver v0.29.4 // indirect k8s.io/cli-runtime v0.29.0 // indirect - k8s.io/client-go v0.29.3 // indirect - k8s.io/component-base v0.29.3 // indirect + k8s.io/client-go v0.29.4 // indirect + k8s.io/component-base v0.29.4 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kubectl v0.29.0 // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index 76781c89fafc..9f5a29adf6c0 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -874,8 +874,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= +k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= @@ -883,17 +883,17 @@ k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZ k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= -k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= +k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4= k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= +k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= -k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= +k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/helm v2.17.0+incompatible h1:Bpn6o1wKLYqKM3+Osh8e+1/K2g/GsQJ4F4yNF2+deao= From 8a64e64767364eb0ed8fe8b60b90a595f2c3a4d8 Mon Sep 17 00:00:00 2001 From: Cavaughn Browne <113555337+cxbrowne1207@users.noreply.github.com> Date: Wed, 8 May 2024 12:00:05 -0500 Subject: [PATCH 127/193] Bump helm froo 3.14.2 to 3.14.4 (#8119) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 4c575a27bd1a..c25471dac216 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - helm.sh/helm/v3 v3.14.2 + helm.sh/helm/v3 v3.14.4 k8s.io/api v0.29.4 k8s.io/apimachinery v0.29.4 k8s.io/apiserver v0.29.4 @@ -98,7 +98,7 @@ require ( github.com/bmc-toolbox/common v0.0.0-20230717121556-5eb9915a8a5a // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/containerd/containerd v1.7.11 // indirect + github.com/containerd/containerd v1.7.12 // indirect github.com/containerd/log v0.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect diff --git a/go.sum b/go.sum index d7026a334169..8d800a6755b2 100644 --- a/go.sum +++ b/go.sum @@ -209,8 +209,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= -github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -1457,8 +1457,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.14.2 h1:V71fv+NGZv0icBlr+in1MJXuUIHCiPG1hW9gEBISTIA= -helm.sh/helm/v3 v3.14.2/go.mod h1:2itvvDv2WSZXTllknfQo6j7u3VVgMAvm8POCDgYH424= +helm.sh/helm/v3 v3.14.4 h1:6FSpEfqyDalHq3kUr4gOMThhgY55kXUEjdQoyODYnrM= +helm.sh/helm/v3 v3.14.4/go.mod h1:Tje7LL4gprZpuBNTbG34d1Xn5NmRT3OWfBRwpOSer9I= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From cb4cda041766f9003fb7586410716879a2bbfa3b Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Wed, 8 May 2024 17:09:05 -0500 Subject: [PATCH 128/193] Fix cluster direcftory being created with root ownership (#8120) When hostpaths used for bind mounting in Docker don't exist at the time the container is created, dockerd creates the directory. dockerd runs as root on most systems hence the directory is created with root ownership. This ensures the directory exists before we attempt to launch the tools container that bind mounts the cluster directory. --- pkg/dependencies/factory.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 3f56d8f64f02..312090557e99 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -341,6 +341,12 @@ func (f *Factory) WithDockerLogin() *Factory { } func (f *Factory) WithExecutableBuilder() *Factory { + // Ensure the file writer is created before the tools container is launched. This is necessary + // because we bind mount the cluster directory into the tools container. If the directory + // doesn't exist, dockerd (running as root) creates the hostpath for the bind mount with root + // ownership. This prevents further files from being written to the cluster directory. + f.WithWriter() + if f.executablesConfig.useDockerContainer { f.WithExecutableImage().WithDocker() if f.registryMirror != nil && f.registryMirror.Auth { From e9569f6fe6c49f9941372d4d63a0d0f12a6b1756 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Thu, 9 May 2024 09:28:06 -0700 Subject: [PATCH 129/193] Fix typos in general troubleshooting docs (#8122) --- docs/content/en/docs/troubleshooting/troubleshooting.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/en/docs/troubleshooting/troubleshooting.md b/docs/content/en/docs/troubleshooting/troubleshooting.md index 54f07ce0ad73..83cb909e6a2d 100755 --- a/docs/content/en/docs/troubleshooting/troubleshooting.md +++ b/docs/content/en/docs/troubleshooting/troubleshooting.md @@ -371,10 +371,10 @@ If the bootstrap log indicates that the etcadm join operation fail, this can mea #### New etcd machine cannot find the existing etcd cluster members -The edcdadm log shows error that the new etcd machine cannot connect to the existing etcd cluster memebers. This means the `etcdadm-init` secret is outdated. To update it, run +The edcdadm log shows error that the new etcd machine cannot connect to the existing etcd cluster members. This means the `etcdadm-init` secret is outdated. To update it, run ```sh -kubectl edit -etcd-init -n eksa-system +kubectl edit secrets -etcd-init -n eksa-system ``` and make sure the new etcd machine IP is included in the secret. From 43160bf06d881910a09c97ea74591c913213eab8 Mon Sep 17 00:00:00 2001 From: Tanvir Tatla Date: Thu, 9 May 2024 10:46:06 -0700 Subject: [PATCH 130/193] Update Cert Renewal Docs (#8100) * Update Cert Renewal Docs * Include kubelet directions * address pr comments * address more comments --- .../security/manually-renew-certs.md | 42 ++++++++++++++++--- .../docs/troubleshooting/troubleshooting.md | 39 +++++++++++++++++ 2 files changed, 75 insertions(+), 6 deletions(-) diff --git a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md index 001155f83c26..c1b403c2c9d0 100644 --- a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md +++ b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md @@ -94,15 +94,17 @@ ctr -n k8s.io t exec -t --exec-id etcd ${ETCD_CONTAINER_ID} etcdctl \ {{< /tab >}} {{< /tabpane >}} +- If the above command fails due to multiple etcd containers existing, then navigate to `/var/log/containers/etcd` and confirm which container was running during the issue timeframe (this container would be the 'stale' container). Delete this older etcd once you have renewed the certs and the new etcd container will be able to enter a functioning state. If you don’t do this, the two etcd containers will stay indefinitely and the etcd will not recover. + 3. Repeat the above steps for all etcd nodes. -4. Save the `api-server-etcd-client` `crt` and `key` file as a Secret from one of the etcd nodes, so the `key` can be picked up by new control plane nodes. You will also need them when renewing the certificates on control plane nodes. See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-config-file/#edit-secret) for details on editing Secrets. +4. Save the `apiserver-etcd-client` `crt` and `key` file as a Secret from one of the etcd nodes, so the `key` can be picked up by new control plane nodes. You will also need them when renewing the certificates on control plane nodes. See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-config-file/#edit-secret) for details on editing Secrets. ```bash -kubectl edit secret ${cluster-name}-api-server-etcd-client -n eksa-system +kubectl edit secret ${cluster-name}-apiserver-etcd-client -n eksa-system ``` {{% alert title="Note" color="primary" %}} -For Bottlerocket nodes, the `key` of `api-server-etcd-client` is `server-etcd.client.crt` instead of `api-server-etcd-client.crt`. +For Bottlerocket nodes, the `key` of `apiserver-etcd-client` is `server-etcd.client.crt` instead of `apiserver-etcd-client.crt`. {{% /alert %}} #### Control plane nodes @@ -151,7 +153,7 @@ ${IMAGE_ID} tmp-cert-renew \ {{< /tab >}} {{< /tabpane >}} -3. If you have external etcd nodes, manually replace the `api-server-etcd-client.crt` and `api-server-etcd-client.key` file in `/etc/kubernetes/pki` (or `/var/lib/kubeadm/pki` in Bottlerocket) folder with the files you saved from any etcd node. +3. If you have external etcd nodes, manually replace the `apiserver-etcd-client.crt` and `apiserver-etcd-client.key` file in `/etc/kubernetes/pki` (or `/var/lib/kubeadm/pki` in Bottlerocket) folder with the files you saved from any etcd node. 4. Restart static control plane pods. @@ -159,8 +161,8 @@ ${IMAGE_ID} tmp-cert-renew \ - **For Bottlerocket**: re-enable the static pods: ``` - apiclient get | jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=false - apiclient get | jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=true` + apiclient get | apiclient exec admin jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=false + apiclient get | apiclient exec admin jq -r '.settings.kubernetes["static-pods"] | keys[]' | xargs -n 1 -I {} apiclient set settings.kubernetes.static-pods.{}.enabled=true ``` You can verify Pods restarting by running `kubectl` from your Admin machine. @@ -168,3 +170,31 @@ ${IMAGE_ID} tmp-cert-renew \ 5. Repeat the above steps for all control plane nodes. You can similarly use the above steps to rotate a single certificate instead of all certificates. + +### Kubelet +If `kubeadm certs check-expiration` is happy, but kubectl commands against the cluster fail with `x509: certificate has expired or is not yet valid`, then it's likely that the kubelet certs did not rotate. To rotate them, SSH back into one of the control plane nodes and do the following. + +``` +# backup certs +cd /var/lib/kubelet +cp -r pki pki.bak +rm pki/* + +systemctl restart kubelet +``` + +### Post Renewal +Once all the certificates are valid, verify the kcp object on the affected cluster(s) is not paused. If it is paused, then this usually indicates an issue with the etcd cluster. Check the logs for pods under the `etcdadm-controller-system` namespace for any errors. +If the logs indicate an issue with the etcd endpoints, then you need to update `spec.clusterConfiguration.etcd.endpoints` in the cluster's `kubeadmconfig` resource: `kubectl edit kcp -n eksa-system` + +Example: +``` +etcd: + external: + caFile: /var/lib/kubeadm/pki/etcd/ca.crt + certFile: /var/lib/kubeadm/pki/server-etcd-client.crt + endpoints: + - https://xxx.xxx.xxx.xxx:2379 + - https://xxx.xxx.xxx.xxx:2379 + - https://xxx.xxx.xxx.xxx:2379 +``` diff --git a/docs/content/en/docs/troubleshooting/troubleshooting.md b/docs/content/en/docs/troubleshooting/troubleshooting.md index 83cb909e6a2d..982573d78fa4 100755 --- a/docs/content/en/docs/troubleshooting/troubleshooting.md +++ b/docs/content/en/docs/troubleshooting/troubleshooting.md @@ -248,6 +248,45 @@ kubectl logs -n --kubeconfig= .... ``` +### Kubectl commands return dial tcp: i/o timeout + +If you are unable to run kubectl commands on a cluster due to timeout errors, then it is possible that the server endpoint in the kubeconfig does not match the control plane's endpoint in the infrastructure provider due to kube-vip failing to allocate a virtual IP address to the cluster. If the endpoints do not match, you can ssh into the control plane node to gather logs instead. The kubelet logs can be obtained by running `journalctl -u kubelet.service --no-pager`. It may also be helpful to look at kube-vip logs, which can be found in the `/var/log/pods/kube-system_kube-vip-*` directory. + +#### Verify Cluster Certificates are valid + +If kubectl commands are not working due to timeout issues, then it may also be helpful to verify that certificates on the etcd and control plane nodes have not expired. + +SSH to one of your etcd nodes and view the etcd container logs in `/var/log/containers`. + +View the control plane certificates by SSHing into one of your control plane nodes and run the following commands to view the validity of the /var/lib/kubeadm certificates and see their expiration dates. +{{< tabpane >}} +{{< tab header="Ubuntu or RHEL" lang="bash" >}} +sudo kubeadm certs check-expiration +{{< /tab >}} +{{< tab header="Bottlerocket" lang="bash" >}} +# you would be in the admin container when you ssh to the Bottlerocket machine +# open a root shell +sudo sheltie + +# pull the image +IMAGE_ID=$(apiclient get | apiclient exec admin jq -r '.settings["host-containers"]["kubeadm-bootstrap"].source') + +# ctr is the containerd cli. +# For more information, see https://github.com/projectatomic/containerd/blob/master/docs/cli.md +ctr image pull ${IMAGE_ID} + +# you may see missing etcd certs error, which is expected if you have external etcd nodes +ctr run \ +--mount type=bind,src=/var/lib/kubeadm,dst=/var/lib/kubeadm,options=rbind:rw \ +--mount type=bind,src=/var/lib/kubeadm,dst=/etc/kubernetes,options=rbind:rw \ +--rm \ +${IMAGE_ID} tmp-certs-check \ +/opt/bin/kubeadm certs check-expiration +{{< /tab >}} +{{< /tabpane >}} + +EKS Anywhere typically renews certificates when upgrading a cluster. However, if a cluster has not been upgraded for over a year, then it is necessary to manually renew these certificates. Please see [Certificate rotation]({{< relref "../clustermgmt/security/manually-renew-certs.md" >}}) to manually rotate expired certificates. + ### Bootstrap cluster fails to come up If your bootstrap cluster has problems you may get detailed logs by looking at the files created under the `${CLUSTER_NAME}/logs` folder. The capv-controller-manager log file will surface issues with vsphere specific configuration while the capi-controller-manager log file might surface other generic issues with the cluster configuration passed in. From a2648a828a96e80d8fbf786303b49ddad4d627e4 Mon Sep 17 00:00:00 2001 From: Joey Wang Date: Thu, 9 May 2024 15:07:06 -0400 Subject: [PATCH 131/193] Update requirement doc to include vSphere 8 support (#8130) --- docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md b/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md index c971f28ed194..123e8b9152b2 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-prereq.md @@ -15,7 +15,7 @@ Set up an Administrative machine as described in [Install EKS Anywhere ]({{< rel ## Prepare a VMware vSphere environment To prepare a VMware vSphere environment to run EKS Anywhere, you need the following: -* A vSphere 7+ environment running vCenter. +* A vSphere 7 or 8 environment running vCenter. * Capacity to deploy 6-10 VMs. * DHCP service running in vSphere environment in the primary VM network for your workload cluster. * [Prepare DHCP IP addresses pool]({{< relref "../../clustermgmt/cluster-upgrades/vsphere-and-cloudstack-upgrades.md/#prepare-dhcp-ip-addresses-pool" >}}) From c7a156e562fabcf75c54676dbbe5ed25526ff92b Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Thu, 9 May 2024 15:57:06 -0700 Subject: [PATCH 132/193] [PR BOT] Generate release testdata files (#8128) --- .../testdata/main-bundle-release.yaml | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index ced0fc2150bb..a1c5cec70a22 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -28,7 +28,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -152,7 +152,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -185,7 +185,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -209,7 +209,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -226,7 +226,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -348,7 +348,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -372,7 +372,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -764,7 +764,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -806,7 +806,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -930,7 +930,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -963,7 +963,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -987,7 +987,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -1004,7 +1004,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1126,7 +1126,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -1150,7 +1150,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -1542,7 +1542,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1584,7 +1584,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -1708,7 +1708,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1741,7 +1741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -1765,7 +1765,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -1782,7 +1782,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1904,7 +1904,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -1928,7 +1928,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -2320,7 +2320,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2362,7 +2362,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -2486,7 +2486,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2519,7 +2519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -2543,7 +2543,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -2560,7 +2560,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2682,7 +2682,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -2706,7 +2706,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -3098,7 +3098,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3140,7 +3140,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -3264,7 +3264,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3297,7 +3297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -3321,7 +3321,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -3338,7 +3338,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3460,7 +3460,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -3484,7 +3484,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -3876,7 +3876,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3918,7 +3918,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -4042,7 +4042,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -4075,7 +4075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -4099,7 +4099,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml version: v1.7.1+abcdef1 @@ -4116,7 +4116,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -4238,7 +4238,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -4262,7 +4262,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -4654,7 +4654,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 From 8dabd4df826fe642b0d5e99744c1a2a8fb718d42 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Fri, 10 May 2024 11:50:09 -0700 Subject: [PATCH 133/193] [PR BOT] Generate release testdata files (#8133) --- .../testdata/main-bundle-release.yaml | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index a1c5cec70a22..94b570a4707f 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -575,7 +575,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -584,7 +584,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -593,7 +593,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -602,7 +602,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -611,7 +611,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -620,7 +620,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -1353,7 +1353,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -1362,7 +1362,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -1371,7 +1371,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -1380,7 +1380,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -1389,7 +1389,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -1398,7 +1398,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -2131,7 +2131,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -2140,7 +2140,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -2149,7 +2149,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -2158,7 +2158,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -2167,7 +2167,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -2176,7 +2176,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -2909,7 +2909,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -2918,7 +2918,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -2927,7 +2927,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -2936,7 +2936,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -2945,7 +2945,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -2954,7 +2954,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -3687,7 +3687,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -3696,7 +3696,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -3705,7 +3705,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -3714,7 +3714,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -3723,7 +3723,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -3732,7 +3732,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 @@ -4465,7 +4465,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/cexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 imageToDisk: arch: - amd64 @@ -4474,7 +4474,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: image2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/image2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 kexec: arch: - amd64 @@ -4483,7 +4483,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kexec os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/kexec:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 ociToDisk: arch: - amd64 @@ -4492,7 +4492,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: oci2disk os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/oci2disk:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 reboot: arch: - amd64 @@ -4501,7 +4501,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: reboot os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/reboot:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 writeFile: arch: - amd64 @@ -4510,7 +4510,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: writefile os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:404dab73a8a7f33e973c6e71782f07e82b125da9-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/hub/writefile:d524b77c7a44525c4318da3d2b5857c03711f3f8-eks-a-v0.0.0-dev-build.1 boots: arch: - amd64 From 7e9f310f2cca75b2d01626b9087df9d5439e9bc3 Mon Sep 17 00:00:00 2001 From: Chris Negus Date: Fri, 10 May 2024 17:19:08 -0400 Subject: [PATCH 134/193] Update partner docs info (#8135) --- .../en/docs/overview/partner/_index.md | 124 +++++++++++------- 1 file changed, 77 insertions(+), 47 deletions(-) diff --git a/docs/content/en/docs/overview/partner/_index.md b/docs/content/en/docs/overview/partner/_index.md index 6fe0a9bb2262..9f865e187d9d 100644 --- a/docs/content/en/docs/overview/partner/_index.md +++ b/docs/content/en/docs/overview/partner/_index.md @@ -16,60 +16,65 @@ The following shows validated EKS Anywhere partners whose products have passed c ``` Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Date of Conformance Test : 2024-05-02 Following ISV Partners have Validated their Conformance : -VENDOR_PRODUCT VENDOR_PRODUCT_TYPE -dynatrace dynatrace -komodor k8s-watcher -kong kong-enterprise -accuknox kubearmor -kubecost cost-analyzer -nirmata enterprise-kyverno -lacework polygraph -suse neuvector -newrelic newrelic-bundle -perfectscale perfectscale -pulumi pulumi-kubernetes-operator -sysdig sysdig-agent -hashicorp vault +VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION +aqua aqua-enforcer 2022.4.20 +dynatrace dynatrace 0.10.1 +komodor k8s-watcher 1.15.5 +kong kong-enterprise 2.27.0 +accuknox kubearmor v1.3.2 +kubecost cost-analyzer 2.1.0 +nirmata enterprise-kyverno 1.6.10 +lacework polygraph 6.11.0 +newrelic nri-bundle 5.0.64 +perfectscale perfectscale v0.0.38 +pulumi pulumi-kubernetes-operator 0.3.0 +solo.io solo-istiod 1.18.3-eks-a +sysdig sysdig-agent 1.6.3 +tetrate.io tetrate-istio-distribution 1.18.1 +hashicorp vault 0.25.0 ``` ## vSphere provider validated partners ``` -Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Kubernetes Version : 1.28 +Date of Conformance Test : 2024-05-02 Following ISV Partners have Validated their Conformance : -VENDOR_PRODUCT VENDOR_PRODUCT_TYPE -dynatrace dynatrace -komodor k8s-watcher -kong kong-enterprise -accuknox kubearmor -kubecost cost-analyzer -nirmata enterprise-kyverno -lacework polygraph -suse neuvector -newrelic newrelic-bundle -perfectscale perfectscale -pulumi pulumi-kubernetes-operator -sysdig sysdig-agent -hashicorp vault +VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION +aqua aqua-enforcer 2022.4.20 +dynatrace dynatrace 0.10.1 +komodor k8s-watcher 1.15.5 +kong kong-enterprise 2.27.0 +accuknox kubearmor v1.3.2 +kubecost cost-analyzer 2.1.0 +nirmata enterprise-kyverno 1.6.10 +lacework polygraph 6.11.0 +newrelic nri-bundle 5.0.64 +perfectscale perfectscale v0.0.38 +pulumi pulumi-kubernetes-operator 0.3.0 +solo.io solo-istiod 1.18.3-eks-a +sysdig sysdig-agent 1.6.3 +tetrate.io tetrate-istio-distribution 1.18.1 +hashicorp vault 0.25.0 ``` ## AWS Snow provider validated partners ``` -Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Kubernetes Version : 1.28 +Date of Conformance Test : 2023-11-10 Following ISV Partners have Validated their Conformance : VENDOR_PRODUCT VENDOR_PRODUCT_TYPE dynatrace dynatrace +solo.io solo-istiod komodor k8s-watcher kong kong-enterprise accuknox kubearmor @@ -86,21 +91,46 @@ hashicorp vault ## AWS Outpost provider validated partners ``` Kubernetes Version : 1.27 -Date of Conformance Test : 2023-09-21 +Date of Conformance Test : 2024-05-02 Following ISV Partners have Validated their Conformance : -VENDOR_PRODUCT VENDOR_PRODUCT_TYPE -dynatrace dynatrace -komodor k8s-watcher -kong kong-enterprise -accuknox kubearmor -kubecost cost-analyzer -nirmata enterprise-kyverno -lacework polygraph -suse neuvector -perfectscale perfectscale -pulumi pulumi-kubernetes-operator -sysdig sysdig-agent -hashicorp vault +VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION +aqua aqua-enforcer 2022.4.20 +dynatrace dynatrace 0.10.1 +komodor k8s-watcher 1.15.5 +kong kong-enterprise 2.27.0 +accuknox kubearmor v1.3.2 +kubecost cost-analyzer 2.1.0 +nirmata enterprise-kyverno 1.6.10 +lacework polygraph 6.11.0 +perfectscale perfectscale v0.0.38 +pulumi pulumi-kubernetes-operator 0.3.0 +solo.io solo-istiod 1.18.3-eks-a +sysdig sysdig-agent 1.6.3 +tetrate.io tetrate-istio-distribution 1.18.1 +hashicorp vault 0.25.0 +``` +## Cloud partners +``` +Kubernetes Version : 1.28 +Date of Conformance Test : 2024-05-02 + +Following ISV Partners have Validated their Conformance : + +VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION +aqua aqua-enforcer 2022.4.20 +komodor k8s-watcher 1.15.5 +kong kong-enterprise 2.27.0 +accuknox kubearmor v1.3.2 +kubecost cost-analyzer 2.1.0 +nirmata enterprise-kyverno 1.6.10 +lacework polygraph 6.11.0 +newrelic nri-bundle 5.0.64 +perfectscale perfectscale v0.0.38 +pulumi pulumi-kubernetes-operator 0.3.0 +solo.io solo-istiod 1.18.3-eks-a +sysdig sysdig-agent 1.6.3 +tetrate.io tetrate-istio-distribution 1.18.1 +hashicorp vault 0.25.0 ``` From 388457859221d3c69dc1f6166c39ef642d3e6d1e Mon Sep 17 00:00:00 2001 From: Tanvir Tatla Date: Fri, 10 May 2024 14:53:09 -0700 Subject: [PATCH 135/193] Refine Cert Renewal Docs (#8138) --- .../security/manually-renew-certs.md | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md index c1b403c2c9d0..16097215c955 100644 --- a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md +++ b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md @@ -183,8 +183,29 @@ rm pki/* systemctl restart kubelet ``` +In some cases, the certs might not regenerate and kubelet will fail to start due to a missing `kubelet-client-current.pem`. If this happens, run the following commands: + +{{< tabpane >}} +{{< tab header="Ubuntu or RHEL" lang="bash" >}} +cat /var/lib/kubeadm/admin.conf | grep client-certificate-data: | sed 's/^.*: //' | base64 -d > /var/lib/kubelet/pki/kubelet-client-current.pem + +cat /var/lib/kubeadm/admin.conf | grep client-key-data: | sed 's/^.*: //' | base64 -d >> /var/lib/kubelet/pki/kubelet-client-current.pem + +systemctl restart kubelet + +{{< /tab >}} +{{< tab header="Bottlerocket" lang="bash" >}} +cat /var/lib/kubeadm/admin.conf | grep client-certificate-data: | apiclient exec admin sed 's/^.*: //' | base64 -d > /var/lib/kubelet/pki/kubelet-client-current.pem + +cat /var/lib/kubeadm/admin.conf | grep client-key-data: | apiclient exec admin sed 's/^.*: //' | base64 -d >> /var/lib/kubelet/pki/kubelet-client-current.pem + +systemctl restart kubelet + +{{< /tab >}} +{{< /tabpane >}} + ### Post Renewal -Once all the certificates are valid, verify the kcp object on the affected cluster(s) is not paused. If it is paused, then this usually indicates an issue with the etcd cluster. Check the logs for pods under the `etcdadm-controller-system` namespace for any errors. +Once all the certificates are valid, verify the kcp object on the affected cluster(s) is not paused by running `kubectl describe kcp -n eksa-system | grep cluster.x-k8s.io/paused`. If it is paused, then this usually indicates an issue with the etcd cluster. Check the logs for pods under the `etcdadm-controller-system` namespace for any errors. If the logs indicate an issue with the etcd endpoints, then you need to update `spec.clusterConfiguration.etcd.endpoints` in the cluster's `kubeadmconfig` resource: `kubectl edit kcp -n eksa-system` Example: From 770f5865dae55f21cecfba446072bca0e56e9db4 Mon Sep 17 00:00:00 2001 From: Chris Negus Date: Sat, 11 May 2024 10:48:23 -0400 Subject: [PATCH 136/193] Fixed partner docs info (#8139) --- .../en/docs/overview/partner/_index.md | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/docs/content/en/docs/overview/partner/_index.md b/docs/content/en/docs/overview/partner/_index.md index 9f865e187d9d..184a3b5366f1 100644 --- a/docs/content/en/docs/overview/partner/_index.md +++ b/docs/content/en/docs/overview/partner/_index.md @@ -111,26 +111,3 @@ sysdig sysdig-agent 1.6.3 tetrate.io tetrate-istio-distribution 1.18.1 hashicorp vault 0.25.0 ``` -## Cloud partners -``` -Kubernetes Version : 1.28 -Date of Conformance Test : 2024-05-02 - -Following ISV Partners have Validated their Conformance : - -VENDOR_PRODUCT VENDOR_PRODUCT_TYPE VENDOR_PRODUCT_VERSION -aqua aqua-enforcer 2022.4.20 -komodor k8s-watcher 1.15.5 -kong kong-enterprise 2.27.0 -accuknox kubearmor v1.3.2 -kubecost cost-analyzer 2.1.0 -nirmata enterprise-kyverno 1.6.10 -lacework polygraph 6.11.0 -newrelic nri-bundle 5.0.64 -perfectscale perfectscale v0.0.38 -pulumi pulumi-kubernetes-operator 0.3.0 -solo.io solo-istiod 1.18.3-eks-a -sysdig sysdig-agent 1.6.3 -tetrate.io tetrate-istio-distribution 1.18.1 -hashicorp vault 0.25.0 -``` From fecf180e1bc980437f00c2cac99202b767d0c9c8 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Mon, 13 May 2024 14:53:23 -0700 Subject: [PATCH 137/193] Add e2e tests for Vsphere for K8s version 1.30 (#8136) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/QUICK_TESTS.yaml | 20 +- test/e2e/SKIPPED_TESTS.yaml | 13 + test/e2e/vsphere_test.go | 1516 ++++++++++++++++++++++++++++------- test/framework/cluster.go | 2 + test/framework/vsphere.go | 36 + 5 files changed, 1303 insertions(+), 284 deletions(-) diff --git a/test/e2e/QUICK_TESTS.yaml b/test/e2e/QUICK_TESTS.yaml index 023b7d23ca41..71806820ade6 100644 --- a/test/e2e/QUICK_TESTS.yaml +++ b/test/e2e/QUICK_TESTS.yaml @@ -2,15 +2,15 @@ quick_tests: # Docker - TestDocker.*128 # vSphere -- ^TestVSphereKubernetes128To129RedHatUpgrade$ -- TestVSphereKubernetes128To129StackedEtcdRedHatUpgrade -- ^TestVSphereKubernetes128UbuntuTo129Upgrade$ -- TestVSphereKubernetes128UbuntuTo129StackedEtcdUpgrade -- TestVSphereKubernetes128To129Ubuntu2204Upgrade -- TestVSphereKubernetes128To129Ubuntu2204StackedEtcdUpgrade -- TestVSphereKubernetes129Ubuntu2004To2204Upgrade -- TestVSphereKubernetes128BottlerocketTo129Upgrade -- TestVSphereKubernetes128BottlerocketTo129StackedEtcdUpgrade +- ^TestVSphereKubernetes129To130RedHatUpgrade$ +- TestVSphereKubernetes129To130StackedEtcdRedHatUpgrade +- ^TestVSphereKubernetes129UbuntuTo130Upgrade$ +- TestVSphereKubernetes129UbuntuTo130StackedEtcdUpgrade +- TestVSphereKubernetes129To130Ubuntu2204Upgrade +- TestVSphereKubernetes129To130Ubuntu2204StackedEtcdUpgrade +- TestVSphereKubernetes130Ubuntu2004To2204Upgrade +- TestVSphereKubernetes129BottlerocketTo130Upgrade +- TestVSphereKubernetes129BottlerocketTo130StackedEtcdUpgrade # CloudStack - TestCloudStackKubernetes128To129RedhatMultipleFieldsUpgrade - TestCloudStackKubernetes128To129StackedEtcdRedhatMultipleFieldsUpgrade @@ -27,4 +27,4 @@ quick_tests: # Tinkerbell - ^TestTinkerbellKubernetes128UbuntuTo129Upgrade$ - TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade -- TestTinkerbellKubernetes128To129Ubuntu2204Upgrade +- TestTinkerbellKubernetes128To129Ubuntu2204Upgrade \ No newline at end of file diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index 75d398e9b0d5..a9d9d6f6574b 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -26,6 +26,19 @@ skipped_tests: # Nutanix +# Curated packages test for new K8s version +- TestVSphereKubernetes130CuratedPackagesSimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesSimpleFlow +- TestVSphereKubernetes130CuratedPackagesEmissarySimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesEmissarySimpleFlow +- TestVSphereKubernetes130CuratedPackagesHarborSimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesHarborSimpleFlow +- TestVSphereKubernetes130CuratedPackagesAdotUpdateFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesAdotUpdateFlow +- TestVSphereKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow +- TestVSphereKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow +- TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesSimpleFlow + # Snow - TestSnowKubernetes125SimpleFlow - TestSnowKubernetes126SimpleFlow diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index d18f65ac3492..e15ffc896560 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -63,7 +63,7 @@ func TestVSphereKubernetes129BottlerocketAPIServerExtraArgsUpgradeFlow(t *testin } // Autoimport -func TestVSphereKubernetes125BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes126BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -73,12 +73,12 @@ func TestVSphereKubernetes125BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes126BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes127BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -88,12 +88,12 @@ func TestVSphereKubernetes126BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes127BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes128BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -103,12 +103,12 @@ func TestVSphereKubernetes127BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes128BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes129BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -118,12 +118,12 @@ func TestVSphereKubernetes128BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runAutoImportFlow(test, provider) } -func TestVSphereKubernetes129BottlerocketAutoimport(t *testing.T) { +func TestVSphereKubernetes130BottlerocketAutoimport(t *testing.T) { provider := framework.NewVSphere(t, framework.WithVSphereFillers( api.WithTemplateForAllMachines(""), @@ -133,22 +133,12 @@ func TestVSphereKubernetes129BottlerocketAutoimport(t *testing.T) { test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runAutoImportFlow(test, provider) } // AWS IAM Auth -func TestVSphereKubernetes125AWSIamAuth(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runAWSIamAuthFlow(test) -} - func TestVSphereKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -189,10 +179,10 @@ func TestVSphereKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestVSphereKubernetes125BottleRocketAWSIamAuth(t *testing.T) { +func TestVSphereKubernetes130AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewVSphere(t, framework.WithBottleRocket125()), + framework.NewVSphere(t, framework.WithUbuntu125()), framework.WithAWSIam(), framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), ) @@ -239,35 +229,33 @@ func TestVSphereKubernetes129BottleRocketAWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestVSphereKubernetes127To128AWSIamAuthUpgrade(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereKubernetes130BottleRocketAWSIamAuth(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + ) + runAWSIamAuthFlow(test) +} + +func TestVSphereKubernetes129To130AWSIamAuthUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runUpgradeFlowWithAWSIamAuth( test, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } // Curated packages -func TestVSphereKubernetes125CuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageInstallSimpleFlow(test) -} - func TestVSphereKubernetes126CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, @@ -316,12 +304,12 @@ func TestVSphereKubernetes129CuratedPackagesSimpleFlow(t *testing.T) { runCuratedPackageInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu125()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -376,16 +364,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesSimpleFlow(t *testing.T) runCuratedPackageInstallSimpleFlow(test) } -func TestVSphereKubernetes125CuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageEmissaryInstallSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) } func TestVSphereKubernetes126CuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -436,12 +424,12 @@ func TestVSphereKubernetes129CuratedPackagesEmissarySimpleFlow(t *testing.T) { runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -496,16 +484,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesEmissarySimpleFlow(t *te runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestVSphereKubernetes125CuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) + runCuratedPackageEmissaryInstallSimpleFlow(test) } func TestVSphereKubernetes126CuratedPackagesHarborSimpleFlow(t *testing.T) { @@ -556,12 +544,12 @@ func TestVSphereKubernetes129CuratedPackagesHarborSimpleFlow(t *testing.T) { runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube128), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -616,16 +604,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesHarborSimpleFlow(t *test runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestVSphereKubernetes125CuratedPackagesAdotUpdateFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesAdotInstallUpdateFlow(test) + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } func TestVSphereKubernetes126CuratedPackagesAdotUpdateFlow(t *testing.T) { @@ -676,12 +664,12 @@ func TestVSphereKubernetes129CuratedPackagesAdotUpdateFlow(t *testing.T) { runCuratedPackagesAdotInstallUpdateFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) { +func TestVSphereKubernetes130CuratedPackagesAdotUpdateFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -736,18 +724,16 @@ func TestVSphereKubernetes129BottleRocketCuratedPackagesAdotUpdateFlow(t *testin runCuratedPackagesAdotInstallUpdateFlow(test) } -func TestVSphereKubernetes125UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { - minNodes := 1 - maxNodes := 2 +func TestVSphereKubernetes130BottleRocketCuratedPackagesAdotUpdateFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runAutoscalerWithMetricsServerSimpleFlow(test) + runCuratedPackagesAdotInstallUpdateFlow(test) } func TestVSphereKubernetes126UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { @@ -806,14 +792,14 @@ func TestVSphereKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { minNodes := 1 maxNodes := 2 framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -862,16 +848,18 @@ func TestVSphereKubernetes128BottleRocketCuratedPackagesClusterAutoscalerSimpleF runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { + minNodes := 1 + maxNodes := 2 framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) + runAutoscalerWithMetricsServerSimpleFlow(test) } func TestVSphereKubernetes126UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { @@ -922,12 +910,12 @@ func TestVSphereKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow(t *testin runCuratedPackagesPrometheusInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithBottleRocket125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) @@ -970,11 +958,16 @@ func TestVSphereKubernetes128BottleRocketCuratedPackagesPrometheusSimpleFlow(t * runCuratedPackagesPrometheusInstallSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketCuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageRemoteClusterInstallSimpleFlow(test) + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesPrometheusInstallSimpleFlow(test) } func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { @@ -1005,6 +998,13 @@ func TestVSphereKubernetes129UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *t runCuratedPackageRemoteClusterInstallSimpleFlow(test) } +func TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageRemoteClusterInstallSimpleFlow(test) +} + func TestVSphereMultipleTemplatesUbuntu127(t *testing.T) { framework.CheckVsphereMultiTemplateUbuntu127EnvVars(t) provider := framework.NewVSphere( @@ -1029,13 +1029,6 @@ func TestVSphereMultipleTemplatesUbuntu127(t *testing.T) { runMultiTemplatesSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithBottleRocket125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageRemoteClusterInstallSimpleFlow(test) -} - func TestVSphereKubernetes126BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) provider := framework.NewVSphere(t, framework.WithBottleRocket126()) @@ -1057,11 +1050,11 @@ func TestVSphereKubernetes128BottleRocketWorkloadClusterCuratedPackagesSimpleFlo runCuratedPackageRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageRemoteClusterInstallSimpleFlow(test) } func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -1085,10 +1078,10 @@ func TestVSphereKubernetes128UbuntuWorkloadClusterCuratedPackagesEmissarySimpleF runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewVSphere(t, framework.WithBottleRocket125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } @@ -1113,12 +1106,11 @@ func TestVSphereKubernetes128BottleRocketWorkloadClusterCuratedPackagesEmissaryS runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - framework.CheckCertManagerCredentials(t) - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCertManagerRemoteClusterInstallSimpleFlow(test) + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } func TestVSphereKubernetes126UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { @@ -1145,11 +1137,11 @@ func TestVSphereKubernetes128UbuntuWorkloadClusterCuratedPackagesCertManagerSimp runCertManagerRemoteClusterInstallSimpleFlow(test) } -func TestVSphereKubernetes125BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) framework.CheckCertManagerCredentials(t) - provider := framework.NewVSphere(t, framework.WithBottleRocket125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) runCertManagerRemoteClusterInstallSimpleFlow(test) } @@ -1177,6 +1169,14 @@ func TestVSphereKubernetes128BottleRocketWorkloadClusterCuratedPackagesCertManag runCertManagerRemoteClusterInstallSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + framework.CheckCertManagerCredentials(t) + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCertManagerRemoteClusterInstallSimpleFlow(test) +} + // Download artifacts func TestVSphereDownloadArtifacts(t *testing.T) { test := framework.NewClusterE2ETest( @@ -1227,6 +1227,18 @@ func TestVSphereKubernetes129GitFlux(t *testing.T) { runFluxFlow(test) } +func TestVSphereKubernetes130GitFlux(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runFluxFlow(test) +} + func TestVSphereKubernetes128BottleRocketGithubFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewVSphere(t, framework.WithBottleRocket128()), @@ -1251,6 +1263,18 @@ func TestVSphereKubernetes129BottleRocketGithubFlux(t *testing.T) { runFluxFlow(test) } +func TestVSphereKubernetes130BottleRocketGithubFlux(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithFluxGithub(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runFluxFlow(test) +} + func TestVSphereKubernetes128BottleRocketGitFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewVSphere(t, framework.WithBottleRocket128()), @@ -1275,6 +1299,18 @@ func TestVSphereKubernetes129BottleRocketGitFlux(t *testing.T) { runFluxFlow(test) } +func TestVSphereKubernetes130BottleRocketGitFlux(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runFluxFlow(test) +} + func TestVSphereKubernetes127To128GitFluxUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) test := framework.NewClusterE2ETest(t, @@ -1311,18 +1347,36 @@ func TestVSphereKubernetes128To129GitFluxUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130GitFluxUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) + test := framework.NewClusterE2ETest(t, + provider, + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFlowWithFlux( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + func TestVSphereInstallGitFluxDuringUpgrade(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest(t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runUpgradeFlowWithFlux( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithFluxGit(), framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)), ) @@ -1381,6 +1435,32 @@ func TestVSphereKubernetes129UbuntuLabelsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuLabelsUpgradeFlow(t *testing.T) { + provider := ubuntu130ProviderWithLabels(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runLabelsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), + api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), + api.WithWorkerNodeGroup(worker2), + api.WithControlPlaneLabel(cpKey1, cpVal1), + ), + ) +} + func TestVSphereKubernetes128BottlerocketLabelsUpgradeFlow(t *testing.T) { provider := bottlerocket128ProviderWithLabels(t) @@ -1433,6 +1513,32 @@ func TestVSphereKubernetes129BottlerocketLabelsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketLabelsUpgradeFlow(t *testing.T) { + provider := bottlerocket130ProviderWithLabels(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runLabelsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), + api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), + api.WithWorkerNodeGroup(worker2), + api.WithControlPlaneLabel(cpKey1, cpVal1), + ), + ) +} + // Multicluster func TestVSphereKubernetes128MulticlusterWorkloadCluster(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu128()) @@ -1490,6 +1596,34 @@ func TestVSphereKubernetes129MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } +func TestVSphereKubernetes130MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := framework.NewMulticlusterE2ETest( + t, + framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + ) + runWorkloadClusterFlow(test) +} + func TestVSphereUpgradeMulticlusterWorkloadClusterWithGithubFlux(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu126()) test := framework.NewMulticlusterE2ETest( @@ -1531,19 +1665,6 @@ func TestVSphereUpgradeMulticlusterWorkloadClusterWithGithubFlux(t *testing.T) { } // OIDC -func TestVSphereKubernetes125OIDC(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runOIDCFlow(test) -} - func TestVSphereKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1596,6 +1717,19 @@ func TestVSphereKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } +func TestVSphereKubernetes130OIDC(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runOIDCFlow(test) +} + func TestVSphereKubernetes127To128OIDCUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) test := framework.NewClusterE2ETest( @@ -1644,6 +1778,20 @@ func TestVSphereKubernetes129UbuntuProxyConfigFlow(t *testing.T) { runProxyConfigFlow(test) } +func TestVSphereKubernetes130UbuntuProxyConfigFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), + framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + runProxyConfigFlow(test) +} + func TestVSphereKubernetes128BottlerocketProxyConfigFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1672,8 +1820,22 @@ func TestVSphereKubernetes129BottlerocketProxyConfigFlow(t *testing.T) { runProxyConfigFlow(test) } -// Registry mirror -func TestVSphereKubernetes128UbuntuRegistryMirrorInsecureSkipVerify(t *testing.T) { +func TestVSphereKubernetes130BottlerocketProxyConfigFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), + framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + runProxyConfigFlow(test) +} + +// Registry mirror +func TestVSphereKubernetes128UbuntuRegistryMirrorInsecureSkipVerify(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewVSphere(t, framework.WithUbuntu128(), framework.WithPrivateNetwork()), @@ -1712,6 +1874,19 @@ func TestVSphereKubernetes129UbuntuRegistryMirrorAndCert(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130UbuntuRegistryMirrorAndCert(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestVSphereKubernetes128BottlerocketRegistryMirrorAndCert(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1738,6 +1913,19 @@ func TestVSphereKubernetes129BottlerocketRegistryMirrorAndCert(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130BottlerocketRegistryMirrorAndCert(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestVSphereKubernetes128UbuntuAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1764,6 +1952,19 @@ func TestVSphereKubernetes129UbuntuAuthenticatedRegistryMirror(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130UbuntuAuthenticatedRegistryMirror(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestVSphereKubernetes128BottlerocketAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1790,6 +1991,19 @@ func TestVSphereKubernetes129BottlerocketAuthenticatedRegistryMirror(t *testing. runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130BottlerocketAuthenticatedRegistryMirror(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestVSphereKubernetes129BottlerocketRegistryMirrorOciNamespaces(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1803,6 +2017,19 @@ func TestVSphereKubernetes129BottlerocketRegistryMirrorOciNamespaces(t *testing. runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes130BottlerocketRegistryMirrorOciNamespaces(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorOciNamespaces(constants.VSphereProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + // Clone mode func TestVSphereKubernetes128FullClone(t *testing.T) { diskSize := 30 @@ -1842,6 +2069,25 @@ func TestVSphereKubernetes129FullClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } +func TestVSphereKubernetes130FullClone(t *testing.T) { + diskSize := 30 + vsphere := framework.NewVSphere(t, + framework.WithUbuntu130(), + framework.WithFullCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + + test := framework.NewClusterE2ETest( + t, + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + ) + runVSphereCloneModeFlow(test, vsphere, diskSize) +} + func TestVSphereKubernetes128LinkedClone(t *testing.T) { diskSize := 20 vsphere := framework.NewVSphere(t, @@ -1880,6 +2126,25 @@ func TestVSphereKubernetes129LinkedClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } +func TestVSphereKubernetes130LinkedClone(t *testing.T) { + diskSize := 20 + vsphere := framework.NewVSphere(t, + framework.WithUbuntu130(), + framework.WithLinkedCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + + test := framework.NewClusterE2ETest( + t, + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + ) + runVSphereCloneModeFlow(test, vsphere, diskSize) +} + func TestVSphereKubernetes128BottlerocketFullClone(t *testing.T) { diskSize := 30 vsphere := framework.NewVSphere(t, @@ -1918,6 +2183,25 @@ func TestVSphereKubernetes129BottlerocketFullClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } +func TestVSphereKubernetes130BottlerocketFullClone(t *testing.T) { + diskSize := 30 + vsphere := framework.NewVSphere(t, + framework.WithBottleRocket130(), + framework.WithFullCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + + test := framework.NewClusterE2ETest( + t, + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + ) + runVSphereCloneModeFlow(test, vsphere, diskSize) +} + func TestVSphereKubernetes128BottlerocketLinkedClone(t *testing.T) { diskSize := 22 vsphere := framework.NewVSphere(t, @@ -1956,16 +2240,26 @@ func TestVSphereKubernetes129BottlerocketLinkedClone(t *testing.T) { runVSphereCloneModeFlow(test, vsphere, diskSize) } -// Simpleflow -func TestVSphereKubernetes125Ubuntu2004SimpleFlow(t *testing.T) { +func TestVSphereKubernetes130BottlerocketLinkedClone(t *testing.T) { + diskSize := 22 + vsphere := framework.NewVSphere(t, + framework.WithBottleRocket130(), + framework.WithLinkedCloneMode(), + framework.WithDiskGiBForAllMachines(diskSize), + ) + test := framework.NewClusterE2ETest( t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + vsphere, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), ) - runSimpleFlow(test) + runVSphereCloneModeFlow(test, vsphere, diskSize) } +// Simpleflow func TestVSphereKubernetes126Ubuntu2004SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2002,15 +2296,13 @@ func TestVSphereKubernetes129Ubuntu2004SimpleFlow(t *testing.T) { runSimpleFlow(test) } -func TestVSphereKubernetes125Ubuntu2204SimpleFlow(t *testing.T) { - provider := framework.NewVSphere(t) +func TestVSphereKubernetes130Ubuntu2004SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) - runSimpleFlowWithoutClusterConfigGeneration(test) + runSimpleFlow(test) } func TestVSphereKubernetes126Ubuntu2204SimpleFlow(t *testing.T) { @@ -2057,13 +2349,15 @@ func TestVSphereKubernetes129Ubuntu2204SimpleFlow(t *testing.T) { runSimpleFlowWithoutClusterConfigGeneration(test) } -func TestVSphereKubernetes125RedHatSimpleFlow(t *testing.T) { +func TestVSphereKubernetes130Ubuntu2204SimpleFlow(t *testing.T) { + provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( t, - framework.NewVSphere(t, framework.WithRedHat125VSphere()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2204, nil), ) - runSimpleFlow(test) + runSimpleFlowWithoutClusterConfigGeneration(test) } func TestVSphereKubernetes126RedHatSimpleFlow(t *testing.T) { @@ -2102,6 +2396,15 @@ func TestVSphereKubernetes129RedHatSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130RedHatSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithRedHat130VSphere()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2124,6 +2427,17 @@ func TestVSphereKubernetes129ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(5)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128DifferentNamespaceSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2144,6 +2458,16 @@ func TestVSphereKubernetes129DifferentNamespaceSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130DifferentNamespaceSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithVSphereFillers(api.WithVSphereConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes127BottleRocketSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2171,6 +2495,15 @@ func TestVSphereKubernetes129BottleRocketSimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128BottleRocketThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2193,6 +2526,17 @@ func TestVSphereKubernetes129BottleRocketThreeReplicasFiveWorkersSimpleFlow(t *t runSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(5)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128BottleRocketDifferentNamespaceSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2215,6 +2559,17 @@ func TestVSphereKubernetes129BottleRocketDifferentNamespaceSimpleFlow(t *testing runSimpleFlow(test) } +func TestVSphereKubernetes130BottleRocketDifferentNamespaceSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130(), + framework.WithVSphereFillers(api.WithVSphereConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), + ) + runSimpleFlow(test) +} + func TestVSphereKubernetes128CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2252,6 +2607,19 @@ func TestVSphereKubernetes129BottleRocketWithNTP(t *testing.T) { runNTPFlow(test, v1alpha1.Bottlerocket) } +func TestVSphereKubernetes130BottleRocketWithNTP(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere( + t, framework.WithBottleRocket129(), + framework.WithNTPServersForAllMachines(), + framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty + ), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runNTPFlow(test, v1alpha1.Bottlerocket) +} + func TestVSphereKubernetes128UbuntuWithNTP(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2278,6 +2646,19 @@ func TestVSphereKubernetes129UbuntuWithNTP(t *testing.T) { runNTPFlow(test, v1alpha1.Ubuntu) } +func TestVSphereKubernetes130UbuntuWithNTP(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere( + t, framework.WithUbuntu130(), + framework.WithNTPServersForAllMachines(), + framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty + ), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runNTPFlow(test, v1alpha1.Ubuntu) +} + // Bottlerocket Configuration test func TestVSphereKubernetes128BottlerocketWithBottlerocketKubernetesSettings(t *testing.T) { test := framework.NewClusterE2ETest( @@ -2305,16 +2686,20 @@ func TestVSphereKubernetes129BottlerocketWithBottlerocketKubernetesSettings(t *t runBottlerocketConfigurationFlow(test) } -// Stacked etcd -func TestVSphereKubernetes125StackedEtcdUbuntu(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewVSphere(t, framework.WithUbuntu125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(3)), - framework.WithClusterFiller(api.WithStackedEtcdTopology())) - runStackedEtcdFlow(test) +func TestVSphereKubernetes130BottlerocketWithBottlerocketKubernetesSettings(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere( + t, framework.WithBottleRocket130(), + framework.WithBottlerocketKubernetesSettingsForAllMachines(), + framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty + ), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runBottlerocketConfigurationFlow(test) } +// Stacked etcd func TestVSphereKubernetes126StackedEtcdUbuntu(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewVSphere(t, framework.WithUbuntu126()), @@ -2351,6 +2736,15 @@ func TestVSphereKubernetes129StackedEtcdUbuntu(t *testing.T) { runStackedEtcdFlow(test) } +func TestVSphereKubernetes130StackedEtcdUbuntu(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithStackedEtcdTopology())) + runStackedEtcdFlow(test) +} + // Taints func TestVSphereKubernetes128UbuntuTaintsUpgradeFlow(t *testing.T) { provider := ubuntu128ProviderWithTaints(t) @@ -2404,6 +2798,32 @@ func TestVSphereKubernetes129UbuntuTaintsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuTaintsUpgradeFlow(t *testing.T) { + provider := ubuntu130ProviderWithTaints(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runTaintsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker2, api.WithNoTaints()), + api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}), + ), + ) +} + func TestVSphereKubernetes128BottlerocketTaintsUpgradeFlow(t *testing.T) { provider := bottlerocket128ProviderWithTaints(t) @@ -2456,6 +2876,32 @@ func TestVSphereKubernetes129BottlerocketTaintsUpgradeFlow(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketTaintsUpgradeFlow(t *testing.T) { + provider := bottlerocket130ProviderWithTaints(t) + + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate + ), + ) + + runTaintsUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), + api.WithWorkerNodeGroup(worker2, api.WithNoTaints()), + api.WithControlPlaneTaints([]corev1.Taint{framework.PreferNoScheduleTaint()}), + ), + ) +} + func TestVSphereKubernetes127UbuntuWorkloadClusterTaintsFlow(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) @@ -2552,19 +2998,18 @@ func TestVSphereKubernetes128UbuntuTo129Upgrade(t *testing.T) { ) } -func TestVSphereKubernetes125To126Ubuntu2204Upgrade(t *testing.T) { - provider := framework.NewVSphere(t) +func TestVSphereKubernetes129UbuntuTo130Upgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) - runSimpleUpgradeFlowWithoutClusterConfigGeneration( + runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -2616,6 +3061,22 @@ func TestVSphereKubernetes128To129Ubuntu2204Upgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130Ubuntu2204Upgrade(t *testing.T) { + provider := framework.NewVSphere(t) + test := framework.NewClusterE2ETest( + t, + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes130Template()), + ) +} + func TestVSphereKubernetes127To128Ubuntu2204StackedEtcdUpgrade(t *testing.T) { provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( @@ -2654,6 +3115,25 @@ func TestVSphereKubernetes128To129Ubuntu2204StackedEtcdUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130Ubuntu2204StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewVSphere(t) + test := framework.NewClusterE2ETest( + t, + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + api.ClusterToConfigFiller( + api.WithStackedEtcdTopology(), + ), + ) + runSimpleUpgradeFlowWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes130Template()), + ) +} + func TestVSphereKubernetes127To128RedHatUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithRedHat127VSphere()) test := framework.NewClusterE2ETest( @@ -2684,6 +3164,21 @@ func TestVSphereKubernetes128To129RedHatUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130RedHatUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithRedHat129VSphere()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat130Template()), + ) +} + func TestVSphereKubernetes127To128StackedEtcdRedHatUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithRedHat127VSphere()) test := framework.NewClusterE2ETest( @@ -2716,6 +3211,22 @@ func TestVSphereKubernetes128To129StackedEtcdRedHatUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129To130StackedEtcdRedHatUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithRedHat129VSphere()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat129Template()), + ) +} + func TestVSphereKubernetes126Ubuntu2004To2204Upgrade(t *testing.T) { provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( @@ -2764,19 +3275,35 @@ func TestVSphereKubernetes128Ubuntu2004To2204Upgrade(t *testing.T) { ) } -func TestVSphereKubernetes129Ubuntu2004To2204Upgrade(t *testing.T) { +func TestVSphereKubernetes129Ubuntu2004To2204Upgrade(t *testing.T) { + provider := framework.NewVSphere(t) + test := framework.NewClusterE2ETest( + t, + provider, + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowWithoutClusterConfigGeneration( + test, + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes129Template()), + ) +} + +func TestVSphereKubernetes130Ubuntu2004To2204Upgrade(t *testing.T) { provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( t, provider, ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2204, nil), ) runSimpleUpgradeFlowWithoutClusterConfigGeneration( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), - provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes129Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu2204Kubernetes130Template()), ) } @@ -2913,6 +3440,33 @@ func TestVSphereKubernetes128UbuntuTo129MultipleFieldsUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129UbuntuTo130MultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade( + provider.Ubuntu130Template(), + api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar), + api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate), + api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar), + api.WithFolderForAllMachines(vsphereFolderUpdateVar), + // Uncomment once we support tests with multiple machine configs + /*api.WithWorkloadVMsNumCPUs(vsphereWlVmNumCpuUpdateVar), + api.WithWorkloadVMsMemoryMiB(vsphereWlVmMemoryUpdate), + api.WithWorkloadDiskGiB(vsphereWlDiskGiBUpdate),*/ + // Uncomment the network field once upgrade starts working with it + // api.WithNetwork(vsphereNetwork2UpdateVar), + ), + ) +} + func TestVSphereKubernetes128UbuntuControlPlaneNodeUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu128()) test := framework.NewClusterE2ETest( @@ -2945,6 +3499,22 @@ func TestVSphereKubernetes129UbuntuControlPlaneNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuControlPlaneNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), + ) +} + func TestVSphereKubernetes128UbuntuWorkerNodeUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu128()) test := framework.NewClusterE2ETest( @@ -2977,6 +3547,22 @@ func TestVSphereKubernetes129UbuntuWorkerNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), + ) +} + func TestVSphereKubernetes127BottlerocketTo128Upgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -3007,6 +3593,21 @@ func TestVSphereKubernetes128BottlerocketTo129Upgrade(t *testing.T) { ) } +func TestVSphereKubernetes129BottlerocketTo130Upgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Bottlerocket130Template()), + ) +} + func TestVSphereKubernetes127BottlerocketTo128MultipleFieldsUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -3061,6 +3662,33 @@ func TestVSphereKubernetes128BottlerocketTo129MultipleFieldsUpgrade(t *testing.T ) } +func TestVSphereKubernetes129BottlerocketTo130MultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade( + provider.Bottlerocket130Template(), + api.WithNumCPUsForAllMachines(vsphereCpVmNumCpuUpdateVar), + api.WithMemoryMiBForAllMachines(vsphereCpVmMemoryUpdate), + api.WithDiskGiBForAllMachines(vsphereCpDiskGiBUpdateVar), + api.WithFolderForAllMachines(vsphereFolderUpdateVar), + // Uncomment once we support tests with multiple machine configs + /*api.WithWorkloadVMsNumCPUs(vsphereWlVmNumCpuUpdateVar), + api.WithWorkloadVMsMemoryMiB(vsphereWlVmMemoryUpdate), + api.WithWorkloadDiskGiB(vsphereWlDiskGiBUpdate),*/ + // Uncomment the network field once upgrade starts working with it + // api.WithNetwork(vsphereNetwork2UpdateVar), + ), + ) +} + func TestVSphereKubernetes128BottlerocketControlPlaneNodeUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket128()) test := framework.NewClusterE2ETest( @@ -3093,6 +3721,22 @@ func TestVSphereKubernetes129BottlerocketControlPlaneNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketControlPlaneNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), + ) +} + func TestVSphereKubernetes128BottlerocketWorkerNodeUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket128()) test := framework.NewClusterE2ETest( @@ -3125,6 +3769,22 @@ func TestVSphereKubernetes129BottlerocketWorkerNodeUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket130()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), + ) +} + func TestVSphereKubernetes127UbuntuTo128StackedEtcdUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu127()) test := framework.NewClusterE2ETest( @@ -3161,6 +3821,24 @@ func TestVSphereKubernetes128UbuntuTo129StackedEtcdUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129UbuntuTo130StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + func TestVSphereKubernetes127BottlerocketTo128StackedEtcdUpgrade(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -3197,6 +3875,24 @@ func TestVSphereKubernetes128BottlerocketTo129StackedEtcdUpgrade(t *testing.T) { ) } +func TestVSphereKubernetes129BottlerocketTo130StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithBottleRocket129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Bottlerocket130Template()), + ) +} + func TestVSphereKubernetes127UbuntuTo128UpgradeWithCheckpoint(t *testing.T) { var clusterOpts []framework.ClusterE2ETestOpt var clusterOpts2 []framework.ClusterE2ETestOpt @@ -3393,14 +4089,41 @@ func TestVSphereKubernetes128To129UbuntuUpgradeFromLatestMinorRelease(t *testing ) } -func TestVSphereKubernetes128To129UbuntuInPlaceUpgradeFromLatestMinorRelease(t *testing.T) { +func TestVSphereKubernetes129To130UbuntuUpgradeFromLatestMinorRelease(t *testing.T) { + release := latestMinorRelease(t) + provider := framework.NewVSphere(t, + framework.WithVSphereFillers( + api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu), + ), + framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube129, framework.Ubuntu2004, release), + ) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFromReleaseFlow( + test, + release, + v1alpha1.Kube130, + provider.WithProviderUpgrade( + provider.Ubuntu130Template(), // Set the template so it doesn't get autoimported + ), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) +} + +func TestVSphereKubernetes129To130UbuntuInPlaceUpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewVSphere( t, framework.WithVSphereFillers( api.WithOsFamilyForAllMachines(v1alpha1.Ubuntu), ), - framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube128, framework.Ubuntu2004, release), + framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube129, framework.Ubuntu2004, release), ) test := framework.NewClusterE2ETest( t, @@ -3410,7 +4133,7 @@ func TestVSphereKubernetes128To129UbuntuInPlaceUpgradeFromLatestMinorRelease(t * test.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release)) test.UpdateClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithStackedEtcdTopology(), ), api.VSphereToConfigFiller( @@ -3421,10 +4144,10 @@ func TestVSphereKubernetes128To129UbuntuInPlaceUpgradeFromLatestMinorRelease(t * test, release, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu129Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -3522,6 +4245,33 @@ func TestVSphereKubernetes128To129RedhatUpgradeFromLatestMinorRelease(t *testing ) } +func TestVSphereKubernetes129To130RedhatUpgradeFromLatestMinorRelease(t *testing.T) { + release := latestMinorRelease(t) + provider := framework.NewVSphere(t, + framework.WithVSphereFillers( + api.WithOsFamilyForAllMachines(v1alpha1.RedHat), + ), + framework.WithKubeVersionAndOSForRelease(v1alpha1.Kube129, framework.RedHat8, release), + ) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFromReleaseFlow( + test, + release, + v1alpha1.Kube130, + provider.WithProviderUpgrade( + provider.Redhat130Template(), // Set the template so it doesn't get auto-imported + ), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) +} + func TestVSphereKubernetes125UbuntuUpgradeAndRemoveWorkerNodeGroupsAPI(t *testing.T) { provider := framework.NewVSphere(t) test := framework.NewClusterE2ETest( @@ -3595,30 +4345,6 @@ func TestVSphereKubernetes127to128UpgradeFromLatestMinorReleaseBottleRocketAPI(t ) } -func TestVSphereKubernetes125UbuntuTo126InPlaceUpgrade_1CP_1Worker(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu125()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), - ).WithClusterConfig( - api.ClusterToConfigFiller( - api.WithControlPlaneCount(1), - api.WithWorkerNodeCount(1), - api.WithStackedEtcdTopology(), - api.WithInPlaceUpgradeStrategy(), - ), - api.VSphereToConfigFiller(api.RemoveEtcdVsphereMachineConfig()), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), - ) - - runInPlaceUpgradeFlow( - test, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), - ) -} - func TestVSphereKubernetes126UbuntuTo127InPlaceUpgrade_3CP_1Worker(t *testing.T) { provider := framework.NewVSphere(t, framework.WithUbuntu126()) test := framework.NewClusterE2ETest( @@ -3691,33 +4417,50 @@ func TestVSphereKubernetes128UbuntuTo129InPlaceUpgrade_3CP_3Worker(t *testing.T) ) } -func TestVSphereKubernetes125UbuntuTo128InPlaceUpgrade(t *testing.T) { - var kube126clusterOpts []framework.ClusterE2ETestOpt +func TestVSphereKubernetes129UbuntuTo130InPlaceUpgrade_1CP_1Worker(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), + ).WithClusterConfig( + api.ClusterToConfigFiller( + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + api.WithInPlaceUpgradeStrategy(), + ), + api.VSphereToConfigFiller(api.RemoveEtcdVsphereMachineConfig()), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + ) + + runInPlaceUpgradeFlow( + test, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + +func TestVSphereKubernetes126UbuntuTo130InPlaceUpgrade(t *testing.T) { var kube127clusterOpts []framework.ClusterE2ETestOpt var kube128clusterOpts []framework.ClusterE2ETestOpt - provider := framework.NewVSphere(t, framework.WithUbuntu125()) + var kube129clusterOpts []framework.ClusterE2ETestOpt + var kube130clusterOpts []framework.ClusterE2ETestOpt + provider := framework.NewVSphere(t, framework.WithUbuntu126()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), - ) - kube126clusterOpts = append( - kube126clusterOpts, - framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube126), - api.WithInPlaceUpgradeStrategy(), - ), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), + provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), ) kube127clusterOpts = append( kube127clusterOpts, @@ -3735,23 +4478,40 @@ func TestVSphereKubernetes125UbuntuTo128InPlaceUpgrade(t *testing.T) { ), provider.WithProviderUpgrade(provider.Ubuntu128Template()), ) + kube129clusterOpts = append( + kube129clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithInPlaceUpgradeStrategy(), + ), + provider.WithProviderUpgrade(provider.Ubuntu129Template()), + ) + kube130clusterOpts = append( + kube130clusterOpts, + framework.WithClusterUpgrade( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithInPlaceUpgradeStrategy(), + ), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) runInPlaceMultipleUpgradesFlow( test, - kube126clusterOpts, kube127clusterOpts, kube128clusterOpts, + kube129clusterOpts, + kube130clusterOpts, ) } -func TestVSphereKubernetes128UbuntuInPlaceCPScaleUp1To3(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceCPScaleUp1To3(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -3760,7 +4520,7 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleUp1To3(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3771,15 +4531,15 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleUp1To3(t *testing.T) { ) } -func TestVSphereKubernetes128UbuntuInPlaceCPScaleDown3To1(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceCPScaleDown3To1(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -3788,7 +4548,7 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleDown3To1(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3799,15 +4559,15 @@ func TestVSphereKubernetes128UbuntuInPlaceCPScaleDown3To1(t *testing.T) { ) } -func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -3816,7 +4576,7 @@ func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3827,15 +4587,15 @@ func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleUp1To2(t *testing.T) { ) } -func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleDown2To1(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereKubernetes130UbuntuInPlaceWorkerScaleDown2To1(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu130()) test := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(2), api.WithStackedEtcdTopology(), @@ -3844,7 +4604,7 @@ func TestVSphereKubernetes128UbuntuInPlaceWorkerScaleDown2To1(t *testing.T) { api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlow( test, @@ -3861,22 +4621,22 @@ func TestVSphereKubernetes128UpgradeManagementComponents(t *testing.T) { runUpgradeManagementComponentsFlow(t, release, provider, v1alpha1.Kube128, framework.Ubuntu2004) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade125To126(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu125()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade126To127(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu126()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -3888,42 +4648,42 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade125To126(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), + provider.WithProviderUpgrade(provider.Ubuntu127Template()), ) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade126To127(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu126()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade127To128(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu127()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -3935,42 +4695,42 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade126To127(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube126, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu127Template()), + provider.WithProviderUpgrade(provider.Ubuntu128Template()), ) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade127To128(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu128()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -3982,42 +4742,42 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade127To128(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + provider.WithProviderUpgrade(provider.Ubuntu129Template()), ) } -func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu128()) +func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade129To130(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithEnvVar(features.VSphereInPlaceEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) test.WithWorkloadClusters( @@ -4029,23 +4789,23 @@ func TestVSphereInPlaceUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *t ).WithClusterConfig( api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithStackedEtcdTopology(), api.WithInPlaceUpgradeStrategy(), ), api.VSphereToConfigFiller( api.RemoveEtcdVsphereMachineConfig(), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), ), ) runInPlaceWorkloadUpgradeFlow( test, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithInPlaceUpgradeStrategy(), ), - provider.WithProviderUpgrade(provider.Ubuntu129Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -4503,6 +5263,20 @@ func TestVSphereKubernetes129UbuntuAirgappedRegistryMirror(t *testing.T) { runAirgapConfigFlow(test, "195.18.0.1/16,196.18.0.1/16") } +func TestVSphereKubernetes130UbuntuAirgappedRegistryMirror(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.VSphereProviderName), + ) + + runAirgapConfigFlow(test, "195.18.0.1/16,196.18.0.1/16") +} + func TestVSphereKubernetes129UbuntuAirgappedProxy(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4517,6 +5291,20 @@ func TestVSphereKubernetes129UbuntuAirgappedProxy(t *testing.T) { runAirgapConfigProxyFlow(test, "195.18.0.1/16,196.18.0.1/16") } +func TestVSphereKubernetes130UbuntuAirgappedProxy(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.VsphereProxyRequiredEnvVars), + ) + + runAirgapConfigProxyFlow(test, "195.18.0.1/16,196.18.0.1/16") +} + func TestVSphereKubernetesUbuntu128EtcdEncryption(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4559,6 +5347,27 @@ func TestVSphereKubernetesUbuntu129EtcdEncryption(t *testing.T) { test.DeleteCluster() } +func TestVSphereKubernetesUbuntu130EtcdEncryption(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + ), + framework.WithPodIamConfig(), + ) + test.OSFamily = v1alpha1.Ubuntu + test.GenerateClusterConfig() + test.CreateCluster() + test.PostClusterCreateEtcdEncryptionSetup() + test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{framework.WithEtcdEncrytion()}) + test.StopIfFailed() + test.ValidateEtcdEncryption() + test.DeleteCluster() +} + func TestVSphereKubernetesBottlerocket128EtcdEncryption(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4599,6 +5408,26 @@ func TestVSphereKubernetesBottlerocket129EtcdEncryption(t *testing.T) { test.DeleteCluster() } +func TestVSphereKubernetesBottlerocket130EtcdEncryption(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + ), + framework.WithPodIamConfig(), + ) + test.OSFamily = v1alpha1.Bottlerocket + test.GenerateClusterConfig() + test.CreateCluster() + test.PostClusterCreateEtcdEncryptionSetup() + test.UpgradeClusterWithNewConfig([]framework.ClusterE2ETestOpt{framework.WithEtcdEncrytion()}) + test.StopIfFailed() + test.DeleteCluster() +} + func ubuntu128ProviderWithLabels(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4639,6 +5468,26 @@ func ubuntu129ProviderWithLabels(t *testing.T) *framework.VSphere { ) } +func ubuntu130ProviderWithLabels(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.WithWorkerNodeGroup(worker0, api.WithCount(2), + api.WithLabel(key1, val2)), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.WithWorkerNodeGroup(worker2, api.WithCount(1), + api.WithLabel(key2, val2)), + ), + framework.WithUbuntu130(), + ) +} + func bottlerocket128ProviderWithLabels(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4679,6 +5528,26 @@ func bottlerocket129ProviderWithLabels(t *testing.T) *framework.VSphere { ) } +func bottlerocket130ProviderWithLabels(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.WithWorkerNodeGroup(worker0, api.WithCount(2), + api.WithLabel(key1, val2)), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.WithWorkerNodeGroup(worker2, api.WithCount(1), + api.WithLabel(key2, val2)), + ), + framework.WithBottleRocket130(), + ) +} + func ubuntu128ProviderWithTaints(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4715,6 +5584,24 @@ func ubuntu129ProviderWithTaints(t *testing.T) *framework.VSphere { ) } +func ubuntu130ProviderWithTaints(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.NoScheduleWorkerNodeGroup(worker0, 2), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), + ), + framework.WithUbuntu130(), + ) +} + func bottlerocket128ProviderWithTaints(t *testing.T) *framework.VSphere { return framework.NewVSphere(t, framework.WithVSphereWorkerNodeGroup( @@ -4751,6 +5638,24 @@ func bottlerocket129ProviderWithTaints(t *testing.T) *framework.VSphere { ) } +func bottlerocket130ProviderWithTaints(t *testing.T) *framework.VSphere { + return framework.NewVSphere(t, + framework.WithVSphereWorkerNodeGroup( + worker0, + framework.NoScheduleWorkerNodeGroup(worker0, 2), + ), + framework.WithVSphereWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithVSphereWorkerNodeGroup( + worker2, + framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), + ), + framework.WithBottleRocket130(), + ) +} + func runVSphereCloneModeFlow(test *framework.ClusterE2ETest, vsphere *framework.VSphere, diskSize int) { test.GenerateClusterConfig() test.CreateCluster() @@ -4867,6 +5772,27 @@ func TestVSphereKubernetes129BottlerocketEtcdScaleDown(t *testing.T) { ) } +func TestVSphereKubernetes130BottlerocketEtcdScaleDown(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithBottleRocket130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(3), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + ), + ) + + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithExternalEtcdTopology(1), + ), + ) +} + func TestVSphereKubernetes127to128BottlerocketEtcdScaleUp(t *testing.T) { provider := framework.NewVSphere(t, framework.WithBottleRocket127()) test := framework.NewClusterE2ETest( @@ -4957,6 +5883,27 @@ func TestVSphereKubernetes129UbuntuEtcdScaleUp(t *testing.T) { ) } +func TestVSphereKubernetes130UbuntuEtcdScaleUp(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(1), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + ), + ) + + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithExternalEtcdTopology(3), + ), + ) +} + func TestVSphereKubernetes128UbuntuEtcdScaleDown(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -4999,13 +5946,34 @@ func TestVSphereKubernetes129UbuntuEtcdScaleDown(t *testing.T) { ) } -func TestVSphereKubernetes127to128UbuntuEtcdScaleUp(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereKubernetes130UbuntuEtcdScaleDown(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu130()), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithExternalEtcdTopology(3), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + ), + ) + + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade( + api.WithExternalEtcdTopology(1), + ), + ) +} + +func TestVSphereKubernetes129to130UbuntuEtcdScaleUp(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -5014,22 +5982,22 @@ func TestVSphereKubernetes127to128UbuntuEtcdScaleUp(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(3), ), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } -func TestVSphereKubernetes127to128UbuntuEtcdScaleDown(t *testing.T) { - provider := framework.NewVSphere(t, framework.WithUbuntu127()) +func TestVSphereKubernetes129to130UbuntuEtcdScaleDown(t *testing.T) { + provider := framework.NewVSphere(t, framework.WithUbuntu129()) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(3), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -5038,11 +6006,11 @@ func TestVSphereKubernetes127to128UbuntuEtcdScaleDown(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), ), - provider.WithProviderUpgrade(provider.Ubuntu128Template()), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } diff --git a/test/framework/cluster.go b/test/framework/cluster.go index 631740a65f35..cad1457101d0 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -35,6 +35,7 @@ import ( "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/executables" + "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/git" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" @@ -2186,6 +2187,7 @@ func (e *ClusterE2ETest) setFeatureFlagForUnreleasedKubernetesVersion(version v1 if version == unreleasedK8sVersion { // Set feature flag for the unreleased k8s version when applicable e.T.Logf("Setting k8s version support feature flag...") + os.Setenv(features.K8s130SupportEnvVar, "true") } } diff --git a/test/framework/vsphere.go b/test/framework/vsphere.go index 6e4b9ec28c06..822b9f39c0ad 100644 --- a/test/framework/vsphere.go +++ b/test/framework/vsphere.go @@ -165,6 +165,17 @@ func WithRedHat129VSphere() VSphereOpt { return withVSphereKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } +// WithRedHat130VSphere vsphere test with Redhat 8 for Kubernetes 1.30. +func WithRedHat130VSphere() VSphereOpt { + return withVSphereKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + +// WithUbuntu130 returns a VSphereOpt that adds API fillers to use a Ubuntu vSphere template for k8s 1.30 +// and the "ubuntu" osFamily in all machine configs. +func WithUbuntu130() VSphereOpt { + return withVSphereKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) +} + // WithUbuntu129 returns a VSphereOpt that adds API fillers to use a Ubuntu vSphere template for k8s 1.29 // and the "ubuntu" osFamily in all machine configs. func WithUbuntu129() VSphereOpt { @@ -220,6 +231,11 @@ func WithBottleRocket129() VSphereOpt { return withVSphereKubeVersionAndOS(anywherev1.Kube129, Bottlerocket1, nil) } +// WithBottleRocket130 returns br 1.30 var. +func WithBottleRocket130() VSphereOpt { + return withVSphereKubeVersionAndOS(anywherev1.Kube130, Bottlerocket1, nil) +} + func WithPrivateNetwork() VSphereOpt { return func(v *VSphere) { v.fillers = append(v.fillers, @@ -493,6 +509,11 @@ func (v *VSphere) Ubuntu129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, nil) } +// Ubuntu130Template returns vsphere filler for 1.30 Ubuntu. +func (v *VSphere) Ubuntu130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) +} + // Ubuntu128TemplateForMachineConfig returns vsphere filler for 1.28 Ubuntu for a specific machine config. func (v *VSphere) Ubuntu128TemplateForMachineConfig(name string) api.VSphereFiller { return v.templateForKubeVersionAndOSMachineConfig(name, anywherev1.Kube128, Ubuntu2004) @@ -518,6 +539,11 @@ func (v *VSphere) Ubuntu2204Kubernetes129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2204, nil) } +// Ubuntu2204Kubernetes130Template returns vsphere filler for 1.30 Ubuntu 22.04. +func (v *VSphere) Ubuntu2204Kubernetes130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2204, nil) +} + // Bottlerocket125Template returns vsphere filler for 1.25 BR. func (v *VSphere) Bottlerocket125Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube125, Bottlerocket1, nil) @@ -543,6 +569,16 @@ func (v *VSphere) Bottlerocket129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, Bottlerocket1, nil) } +// Bottlerocket130Template returns vsphere filler for 1.30 BR. +func (v *VSphere) Bottlerocket130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, Bottlerocket1, nil) +} + +// Redhat130Template returns vsphere filler for 1.30 Redhat. +func (v *VSphere) Redhat130Template() api.VSphereFiller { + return v.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + // Redhat129Template returns vsphere filler for 1.29 Redhat. func (v *VSphere) Redhat129Template() api.VSphereFiller { return v.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) From 7a207a0aa592834f0261270c5135c4c60bf708a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 00:07:23 -0700 Subject: [PATCH 138/193] Bump golangci/golangci-lint-action from 5 to 6 (#8146) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5 to 6. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v5...v6) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index e350fde85912..348f2c838c0a 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -23,7 +23,7 @@ jobs: check-latest: true cache: true - name: golangci-lint - uses: golangci/golangci-lint-action@v5 + uses: golangci/golangci-lint-action@v6 with: version: v1.56.2 only-new-issues: true From ee3351399365243b9c65a7e88596b964f0b9acf4 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Tue, 14 May 2024 12:07:24 -0700 Subject: [PATCH 139/193] Add 1.30 Cloudstack E2E tests (#8152) * Add 1.30 Cloudstack E2E tests Signed-off-by: Rahul Ganesh * Update framework funcs with right template name Signed-off-by: Rahul Ganesh --------- Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/QUICK_TESTS.yaml | 4 +- test/e2e/SKIPPED_TESTS.yaml | 4 +- test/e2e/cloudstack_test.go | 910 ++++++++++++++++++----------------- test/framework/cloudstack.go | 36 +- 4 files changed, 512 insertions(+), 442 deletions(-) diff --git a/test/e2e/QUICK_TESTS.yaml b/test/e2e/QUICK_TESTS.yaml index 71806820ade6..4fd1f730e565 100644 --- a/test/e2e/QUICK_TESTS.yaml +++ b/test/e2e/QUICK_TESTS.yaml @@ -12,8 +12,8 @@ quick_tests: - TestVSphereKubernetes129BottlerocketTo130Upgrade - TestVSphereKubernetes129BottlerocketTo130StackedEtcdUpgrade # CloudStack -- TestCloudStackKubernetes128To129RedhatMultipleFieldsUpgrade -- TestCloudStackKubernetes128To129StackedEtcdRedhatMultipleFieldsUpgrade +- TestCloudStackKubernetes129To130RedhatMultipleFieldsUpgrade +- TestCloudStackKubernetes129To130StackedEtcdRedhatMultipleFieldsUpgrade # Nutanix - TestNutanixKubernetes128to129RedHat9Upgrade - TestNutanixKubernetes128to129StackedEtcdRedHat9Upgrade diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index a9d9d6f6574b..f8f141b41312 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -1,18 +1,16 @@ skipped_tests: # CloudStack #Airgapped tests -- TestCloudStackKubernetes125RedhatAirgappedRegistryMirror - TestCloudStackKubernetes126RedhatAirgappedRegistryMirror - TestCloudStackKubernetes128RedhatAirgappedProxy # Proxy tests -- TestCloudStackKubernetes125RedhatProxyConfigAPI - TestCloudStackKubernetes126RedhatProxyConfigAPI - TestCloudStackKubernetes127RedhatProxyConfigAPI - TestCloudStackKubernetes128RedhatProxyConfigAPI +- TestCloudStackKubernetes130RedhatProxyConfigAPI # MultiEndpoint -- TestCloudStackKubernetes125MultiEndpointSimpleFlow - TestCloudStackKubernetes126MultiEndpointSimpleFlow - TestCloudStackKubernetes127MultiEndpointSimpleFlow - TestCloudStackKubernetes128MultiEndpointSimpleFlow diff --git a/test/e2e/cloudstack_test.go b/test/e2e/cloudstack_test.go index f3ca154fdb08..0c4953d3a0cb 100644 --- a/test/e2e/cloudstack_test.go +++ b/test/e2e/cloudstack_test.go @@ -18,14 +18,14 @@ import ( ) // APIServerExtraArgs -func TestCloudStackKubernetes129RedHat8APIServerExtraArgsSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHat8APIServerExtraArgsSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat129()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), framework.WithEnvVar(features.APIServerExtraArgsEnabledEnvVar, "true"), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneAPIServerExtraArgs(), ), ) @@ -33,13 +33,13 @@ func TestCloudStackKubernetes129RedHat8APIServerExtraArgsSimpleFlow(t *testing.T } // TODO: Investigate why this test takes long time to pass with service-account-issuer flag -func TestCloudStackKubernetes129Redhat8APIServerExtraArgsUpgradeFlow(t *testing.T) { +func TestCloudStackKubernetes130Redhat8APIServerExtraArgsUpgradeFlow(t *testing.T) { var addAPIServerExtraArgsclusterOpts []framework.ClusterE2ETestOpt var removeAPIServerExtraArgsclusterOpts []framework.ClusterE2ETestOpt test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat129()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithEnvVar(features.APIServerExtraArgsEnabledEnvVar, "true"), ) addAPIServerExtraArgsclusterOpts = append( @@ -62,16 +62,6 @@ func TestCloudStackKubernetes129Redhat8APIServerExtraArgsUpgradeFlow(t *testing. } // AWS IAM Auth -func TestCloudStackKubernetes125AWSIamAuth(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runAWSIamAuthFlow(test) -} - func TestCloudStackKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -112,20 +102,14 @@ func TestCloudStackKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestCloudStackKubernetes125to126AWSIamAuthUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runUpgradeFlowWithAWSIamAuth( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) + runAWSIamAuthFlow(test) } func TestCloudStackKubernetes126to127AWSIamAuthUpgrade(t *testing.T) { @@ -176,20 +160,23 @@ func TestCloudStackKubernetes128to129AWSIamAuthUpgrade(t *testing.T) { ) } -// Curated packages test -func TestCloudStackKubernetes125RedhatCuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) +func TestCloudStackKubernetes129to130AWSIamAuthUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - "my-packages-test", EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + provider, + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runUpgradeFlowWithAWSIamAuth( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) - runCuratedPackageInstallSimpleFlow(test) } +// Curated packages test func TestCloudStackKubernetes126RedhatCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest( @@ -242,17 +229,17 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesSimpleFlow(t *testing.T) { runCuratedPackageInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageEmissaryInstallSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -307,17 +294,17 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesEmissarySimpleFlow(t *testi runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) + runCuratedPackageEmissaryInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) { @@ -372,11 +359,17 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesHarborSimpleFlow(t *testing runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestCloudStackKubernetes125RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageRemoteClusterInstallSimpleFlow(test) + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + "my-packages-test", EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } func TestCloudStackKubernetes126RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { @@ -407,11 +400,11 @@ func TestCloudStackKubernetes129RedhatWorkloadClusterCuratedPackagesSimpleFlow(t runCuratedPackageRemoteClusterInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageRemoteClusterInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -442,12 +435,11 @@ func TestCloudStackKubernetes129RedhatWorkloadClusterCuratedPackagesEmissarySimp runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - framework.CheckCertManagerCredentials(t) - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) - test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube125) - runCertManagerRemoteClusterInstallSimpleFlow(test) + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) { @@ -482,16 +474,12 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesCertManagerSimpleFlow(t *te runCertManagerRemoteClusterInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesCertManagerSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - "my-packages-test", EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackagesAdotInstallSimpleFlow(test) + framework.CheckCertManagerCredentials(t) + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := SetupSimpleMultiCluster(t, provider, v1alpha1.Kube130) + runCertManagerRemoteClusterInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) { @@ -542,16 +530,16 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesAdotSimpleFlow(t *testing.T runCuratedPackagesAdotInstallSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) { +func TestCloudStackKubernetes130RedhatCuratedPackagesAdotSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesAdotInstallUpdateFlow(test) + runCuratedPackagesAdotInstallSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) { @@ -602,19 +590,16 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesAdotUpdateFlow(t *testing.T runCuratedPackagesAdotInstallUpdateFlow(test) } -func TestCloudStackKubernetes125RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { - minNodes := 1 - maxNodes := 2 +func TestCloudStackKubernetes130RedhatCuratedPackagesAdotUpdateFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + test := framework.NewClusterE2ETest(t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + "my-packages-test", EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runAutoscalerWithMetricsServerSimpleFlow(test) + runCuratedPackagesAdotInstallUpdateFlow(test) } func TestCloudStackKubernetes126RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { @@ -677,16 +662,19 @@ func TestCloudStackKubernetes129RedHatCuratedPackagesClusterAutoscalerSimpleFlow runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestCloudStackKubernetes125RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHatCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { + minNodes := 1 + maxNodes := 2 framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - "my-packages-test", EksaPackageControllerHelmURI, + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) + runAutoscalerWithMetricsServerSimpleFlow(test) } func TestCloudStackKubernetes126RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) { @@ -737,15 +725,27 @@ func TestCloudStackKubernetes129RedhatCuratedPackagesPrometheusSimpleFlow(t *tes runCuratedPackagesPrometheusInstallSimpleFlow(test) } +func TestCloudStackKubernetes130RedhatCuratedPackagesPrometheusSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + "my-packages-test", EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesPrometheusInstallSimpleFlow(test) +} + // Download artifacts func TestCloudStackDownloadArtifacts(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat128()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runDownloadArtifactsFlow(test) } @@ -753,27 +753,15 @@ func TestCloudStackDownloadArtifacts(t *testing.T) { func TestCloudStackRedhat9DownloadArtifacts(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runDownloadArtifactsFlow(test) } -func TestCloudStackKubernetes125GithubFlux(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithFluxGithub(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runFluxFlow(test) -} - func TestCloudStackKubernetes126GithubFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()), @@ -822,11 +810,11 @@ func TestCloudStackKubernetes129GithubFlux(t *testing.T) { runFluxFlow(test) } -func TestCloudStackKubernetes125GitFlux(t *testing.T) { +func TestCloudStackKubernetes130GithubFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithFluxGit(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithFluxGithub(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -882,22 +870,16 @@ func TestCloudStackKubernetes129GitFlux(t *testing.T) { runFluxFlow(test) } -func TestCloudStackKubernetes125To126GitFluxUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130GitFlux(t *testing.T) { test := framework.NewClusterE2ETest(t, - provider, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithFluxGit(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runUpgradeFlowWithFlux( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), - ) + runFluxFlow(test) } func TestCloudStackKubernetes126To127GitFluxUpgrade(t *testing.T) { @@ -936,20 +918,21 @@ func TestCloudStackKubernetes127To128GitFluxUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125InstallGitFluxDuringUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129To130GitFluxUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest(t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithFluxGit(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runUpgradeFlowWithFlux( test, - v1alpha1.Kube125, - framework.WithFluxGit(), - framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), ) } @@ -1021,6 +1004,23 @@ func TestCloudStackKubernetes129InstallGitFluxDuringUpgrade(t *testing.T) { ) } +func TestCloudStackKubernetes130InstallGitFluxDuringUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) + test := framework.NewClusterE2ETest(t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runUpgradeFlowWithFlux( + test, + v1alpha1.Kube130, + framework.WithFluxGit(), + framework.WithClusterUpgrade(api.WithGitOpsRef(framework.DefaultFluxConfigName, v1alpha1.FluxConfigKind)), + ) +} + func TestCloudStackKubernetes128UpgradeManagementComponents(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) @@ -1028,14 +1028,14 @@ func TestCloudStackKubernetes128UpgradeManagementComponents(t *testing.T) { } // Labels -func TestCloudStackKubernetes125LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes126LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes125(), + framework.WithCloudStackRedhat9Kubernetes126(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1050,14 +1050,14 @@ func TestCloudStackKubernetes125LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes126LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes127LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes126(), + framework.WithCloudStackRedhat9Kubernetes127(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1072,14 +1072,14 @@ func TestCloudStackKubernetes126LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes127LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes128LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes127(), + framework.WithCloudStackRedhat9Kubernetes128(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1094,14 +1094,14 @@ func TestCloudStackKubernetes127LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes128LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes129LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes128(), + framework.WithCloudStackRedhat9Kubernetes129(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1116,14 +1116,14 @@ func TestCloudStackKubernetes128LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes129LabelsAndNodeNameRedhat(t *testing.T) { +func TestCloudStackKubernetes130LabelsAndNodeNameRedhat(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes129(), + framework.WithCloudStackRedhat9Kubernetes130(), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneLabel(constants.FailureDomainLabelName, constants.CloudstackFailureDomainPlaceholder), api.WithWorkerNodeGroup(constants.DefaultWorkerNodeGroupName, api.WithCount(1), @@ -1138,14 +1138,14 @@ func TestCloudStackKubernetes129LabelsAndNodeNameRedhat(t *testing.T) { test.DeleteCluster() } -func TestCloudStackKubernetes125RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat125ProviderWithLabels(t) +func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat126ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1154,7 +1154,7 @@ func TestCloudStackKubernetes125RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube126, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1164,14 +1164,14 @@ func TestCloudStackKubernetes125RedhatLabelsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat126ProviderWithLabels(t) +func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat127ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1180,7 +1180,7 @@ func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube127, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1190,14 +1190,14 @@ func TestCloudStackKubernetes126RedhatLabelsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat127ProviderWithLabels(t) +func TestCloudStackKubernetes128RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat128ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1206,7 +1206,7 @@ func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube127, + v1alpha1.Kube128, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1216,14 +1216,14 @@ func TestCloudStackKubernetes127RedhatLabelsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes128RedhatLabelsUpgradeFlow(t *testing.T) { - provider := redhat128ProviderWithLabels(t) +func TestCloudStackKubernetes130RedhatLabelsUpgradeFlow(t *testing.T) { + provider := redhat130ProviderWithLabels(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1232,7 +1232,7 @@ func TestCloudStackKubernetes128RedhatLabelsUpgradeFlow(t *testing.T) { runLabelsUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithLabel(key1, val1)), api.WithWorkerNodeGroup(worker1, api.WithLabel(key2, val2)), @@ -1322,16 +1322,36 @@ func redhat128ProviderWithLabels(t *testing.T) *framework.CloudStack { ) } +func redhat130ProviderWithLabels(t *testing.T) *framework.CloudStack { + return framework.NewCloudStack(t, + framework.WithCloudStackWorkerNodeGroup( + worker0, + framework.WithWorkerNodeGroup(worker0, api.WithCount(2), + api.WithLabel(key1, val2)), + ), + framework.WithCloudStackWorkerNodeGroup( + worker1, + framework.WithWorkerNodeGroup(worker1, api.WithCount(1)), + ), + framework.WithCloudStackWorkerNodeGroup( + worker2, + framework.WithWorkerNodeGroup(worker2, api.WithCount(1), + api.WithLabel(key2, val2)), + ), + framework.WithCloudStackRedhat9Kubernetes130(), + ) +} + // Multicluster -func TestCloudStackKubernetes125MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1341,7 +1361,7 @@ func TestCloudStackKubernetes125MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1351,15 +1371,15 @@ func TestCloudStackKubernetes125MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()) +func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes127()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1369,7 +1389,7 @@ func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1379,15 +1399,15 @@ func TestCloudStackKubernetes126MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes127()) +func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1397,7 +1417,7 @@ func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1407,15 +1427,15 @@ func TestCloudStackKubernetes127MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) +func TestCloudStackKubernetes129MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1425,7 +1445,7 @@ func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1435,15 +1455,15 @@ func TestCloudStackKubernetes128MulticlusterWorkloadCluster(t *testing.T) { runWorkloadClusterFlow(test) } -func TestCloudStackKubernetes129MulticlusterWorkloadCluster(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) +func TestCloudStackKubernetes130MulticlusterWorkloadCluster(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1453,7 +1473,7 @@ func TestCloudStackKubernetes129MulticlusterWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithStackedEtcdTopology(), @@ -1583,10 +1603,45 @@ func TestCloudStackUpgradeKubernetes128MulticlusterWorkloadClusterWithGithubFlux ) } -func TestCloudStackKubernetes125WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) { - cloudstack := framework.NewCloudStack(t) - runTestManagementClusterUpgradeSideEffects(t, cloudstack, framework.RedHat9, anywherev1.Kube125) -} +func TestCloudStackUpgradeKubernetes130MulticlusterWorkloadClusterWithGithubFlux(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) + test := framework.NewMulticlusterE2ETest( + t, + framework.NewClusterE2ETest( + t, + provider, + framework.WithFluxGithub(), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + framework.NewClusterE2ETest( + t, + provider, + framework.WithFluxGithub(), + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithStackedEtcdTopology(), + ), + ), + ) + runWorkloadClusterFlowWithGitOps( + test, + framework.WithClusterUpgradeGit( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithControlPlaneCount(3), + api.WithWorkerNodeCount(3), + ), + provider.WithProviderUpgradeGit( + provider.Redhat9Kubernetes130Template(), + ), + ) +} func TestCloudStackKubernetes126WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) { cloudstack := framework.NewCloudStack(t) @@ -1608,20 +1663,12 @@ func TestCloudStackKubernetes129WithOIDCManagementClusterUpgradeFromLatestSideEf runTestManagementClusterUpgradeSideEffects(t, cloudstack, framework.RedHat9, anywherev1.Kube129) } -// OIDC -func TestCloudStackKubernetes125OIDC(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runOIDCFlow(test) +func TestCloudStackKubernetes130WithOIDCManagementClusterUpgradeFromLatestSideEffects(t *testing.T) { + cloudstack := framework.NewCloudStack(t) + runTestManagementClusterUpgradeSideEffects(t, cloudstack, framework.RedHat9, anywherev1.Kube130) } +// OIDC func TestCloudStackKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1674,23 +1721,17 @@ func TestCloudStackKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } -func TestCloudStackKubernetes125To126OIDCUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runUpgradeFlowWithOIDC( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), - ) + runOIDCFlow(test) } func TestCloudStackKubernetes126To127OIDCUpgrade(t *testing.T) { @@ -1712,20 +1753,26 @@ func TestCloudStackKubernetes126To127OIDCUpgrade(t *testing.T) { ) } -// Proxy config -func TestCloudStackKubernetes125RedhatProxyConfig(t *testing.T) { +func TestCloudStackKubernetes129To130OIDCUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), + provider, + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithProxy(framework.CloudstackProxyRequiredEnvVars), ) - runProxyConfigFlow(test) + runUpgradeFlowWithOIDC( + test, + v1alpha1.Kube126, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), + ) } +// Proxy config func TestCloudStackKubernetes126RedhatProxyConfig(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1778,8 +1825,21 @@ func TestCloudStackKubernetes129RedhatProxyConfig(t *testing.T) { runProxyConfigFlow(test) } +func TestCloudStackKubernetes130RedhatProxyConfig(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithProxy(framework.CloudstackProxyRequiredEnvVars), + ) + runProxyConfigFlow(test) +} + // Proxy config multicluster -func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1789,7 +1849,7 @@ func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes125(), + cloudstack.WithRedhat9Kubernetes126(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1803,7 +1863,7 @@ func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes125(), + cloudstack.WithRedhat9Kubernetes126(), ), ) @@ -1823,7 +1883,7 @@ func TestCloudStackKubernetes125RedhatProxyConfigAPI(t *testing.T) { test.DeleteManagementCluster() } -func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1833,7 +1893,7 @@ func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes126(), + cloudstack.WithRedhat9Kubernetes127(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1847,7 +1907,7 @@ func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes126(), + cloudstack.WithRedhat9Kubernetes127(), ), ) @@ -1867,7 +1927,7 @@ func TestCloudStackKubernetes126RedhatProxyConfigAPI(t *testing.T) { test.DeleteManagementCluster() } -func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1877,7 +1937,7 @@ func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes127(), + cloudstack.WithRedhat9Kubernetes128(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1891,7 +1951,7 @@ func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes127(), + cloudstack.WithRedhat9Kubernetes128(), ), ) @@ -1911,7 +1971,7 @@ func TestCloudStackKubernetes127RedhatProxyConfigAPI(t *testing.T) { test.DeleteManagementCluster() } -func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { +func TestCloudStackKubernetes130RedhatProxyConfigAPI(t *testing.T) { cloudstack := framework.NewCloudStack(t) managementCluster := framework.NewClusterE2ETest( t, @@ -1921,7 +1981,7 @@ func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), ), - cloudstack.WithRedhat9Kubernetes128(), + cloudstack.WithRedhat9Kubernetes130(), ) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1935,7 +1995,7 @@ func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { api.ClusterToConfigFiller( api.WithManagementCluster(managementCluster.ClusterName), ), - cloudstack.WithRedhat9Kubernetes128(), + cloudstack.WithRedhat9Kubernetes130(), ), ) @@ -1956,19 +2016,6 @@ func TestCloudStackKubernetes128RedhatProxyConfigAPI(t *testing.T) { } // Registry mirror -func TestCloudStackKubernetes125RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithRegistryMirrorInsecureSkipVerify(constants.CloudStackProviderName), - ) - runRegistryMirrorConfigFlow(test) -} - func TestCloudStackKubernetes126RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2008,15 +2055,15 @@ func TestCloudStackKubernetes128RedhatRegistryMirrorInsecureSkipVerify(t *testin runRegistryMirrorConfigFlow(test) } -func TestCloudStackKubernetes125RedhatRegistryMirrorAndCert(t *testing.T) { +func TestCloudStackKubernetes130RedhatRegistryMirrorInsecureSkipVerify(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorInsecureSkipVerify(constants.CloudStackProviderName), ) runRegistryMirrorConfigFlow(test) } @@ -2073,6 +2120,19 @@ func TestCloudStackKubernetes129RedhatRegistryMirrorAndCert(t *testing.T) { runRegistryMirrorConfigFlow(test) } +func TestCloudStackKubernetes130RedhatRegistryMirrorAndCert(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName), + ) + runRegistryMirrorConfigFlow(test) +} + func TestCloudStackKubernetes125RedhatAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2126,15 +2186,6 @@ func TestCloudStackKubernetes128RedhatAuthenticatedRegistryMirror(t *testing.T) } // Simpleflow -func TestCloudStackKubernetes125RedHat8SimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runSimpleFlow(test) -} - func TestCloudStackKubernetes126RedHat8SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2171,11 +2222,11 @@ func TestCloudStackKubernetes129RedHat8SimpleFlow(t *testing.T) { runSimpleFlow(test) } -func TestCloudStackKubernetes125RedHat9SimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHat8SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -2216,13 +2267,11 @@ func TestCloudStackKubernetes129RedHat9SimpleFlow(t *testing.T) { runSimpleFlow(test) } -func TestCloudStackKubernetes125ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130RedHat9SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(3)), - framework.WithClusterFiller(api.WithWorkerNodeCount(5)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -2271,12 +2320,13 @@ func TestCloudStackKubernetes129ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) runSimpleFlow(test) } -func TestCloudStackKubernetes125MultiEndpointSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130ThreeReplicasFiveWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125(), - framework.WithCloudStackFillers(framework.UpdateAddCloudStackAz2())), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(5)), ) runSimpleFlow(test) } @@ -2321,14 +2371,12 @@ func TestCloudStackKubernetes129MultiEndpointSimpleFlow(t *testing.T) { runSimpleFlow(test) } -func TestCloudStackKubernetes125DifferentNamespaceSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130MultiEndpointSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125(), - framework.WithCloudStackFillers(api.WithCloudStackConfigNamespace(clusterNamespace), - api.WithCloudStackConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130(), + framework.WithCloudStackFillers(framework.UpdateAddCloudStackAz2())), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -2369,17 +2417,19 @@ func TestCloudStackKubernetes128DifferentNamespaceSimpleFlow(t *testing.T) { runSimpleFlow(test) } -// Cilium Policy -func TestCloudStackKubernetes125CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { +func TestCloudStackKubernetes130DifferentNamespaceSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)), + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130(), + framework.WithCloudStackFillers(api.WithCloudStackConfigNamespace(clusterNamespace), + api.WithCloudStackConfigNamespaceForAllMachinesAndDatacenter(clusterNamespace))), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithClusterNamespace(clusterNamespace)), ) runSimpleFlow(test) } +// Cilium Policy func TestCloudStackKubernetes126CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -2410,6 +2460,16 @@ func TestCloudStackKubernetes128CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *t runSimpleFlow(test) } +func TestCloudStackKubernetes130CiliumAlwaysPolicyEnforcementModeSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithCiliumPolicyEnforcementMode(v1alpha1.CiliumPolicyModeAlways)), + ) + runSimpleFlow(test) +} + func TestCloudStackKubernetes125RedhatTo126UpgradeCiliumPolicyEnforcementMode(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) test := framework.NewClusterE2ETest( @@ -2449,15 +2509,6 @@ func TestCloudStackKubernetes126RedhatTo127UpgradeCiliumPolicyEnforcementMode(t } // Stacked etcd -func TestCloudStackKubernetes125StackedEtcdRedhat(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithStackedEtcdTopology())) - runStackedEtcdFlow(test) -} - func TestCloudStackKubernetes126StackedEtcdRedhat(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes126()), @@ -2494,15 +2545,24 @@ func TestCloudStackKubernetes129StackedEtcdRedhat(t *testing.T) { runStackedEtcdFlow(test) } +func TestCloudStackKubernetes130StackedEtcdRedhat(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology())) + runStackedEtcdFlow(test) +} + // Taints -func TestCloudStackKubernetes125RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat125ProviderWithTaints(t) +func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat126ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2511,7 +2571,7 @@ func TestCloudStackKubernetes125RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube126, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2521,14 +2581,14 @@ func TestCloudStackKubernetes125RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat126ProviderWithTaints(t) +func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat127ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2537,7 +2597,7 @@ func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube127, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2547,14 +2607,14 @@ func TestCloudStackKubernetes126RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat127ProviderWithTaints(t) +func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat128ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2563,7 +2623,7 @@ func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube127, + v1alpha1.Kube128, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2573,14 +2633,14 @@ func TestCloudStackKubernetes127RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { - provider := redhat128ProviderWithTaints(t) +func TestCloudStackKubernetes130RedhatTaintsUpgradeFlow(t *testing.T) { + provider := redhat130ProviderWithTaints(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2589,7 +2649,7 @@ func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -2599,7 +2659,7 @@ func TestCloudStackKubernetes128RedhatTaintsUpgradeFlow(t *testing.T) { ) } -func redhat125ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat126ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2613,11 +2673,11 @@ func redhat125ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes125(), + framework.WithCloudStackRedhat9Kubernetes126(), ) } -func redhat126ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat127ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2631,11 +2691,11 @@ func redhat126ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes126(), + framework.WithCloudStackRedhat9Kubernetes127(), ) } -func redhat127ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat128ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2649,11 +2709,11 @@ func redhat127ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes127(), + framework.WithCloudStackRedhat9Kubernetes128(), ) } -func redhat128ProviderWithTaints(t *testing.T) *framework.CloudStack { +func redhat130ProviderWithTaints(t *testing.T) *framework.CloudStack { return framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( worker0, @@ -2667,12 +2727,12 @@ func redhat128ProviderWithTaints(t *testing.T) *framework.CloudStack { worker2, framework.PreferNoScheduleWorkerNodeGroup(worker2, 1), ), - framework.WithCloudStackRedhat9Kubernetes128(), + framework.WithCloudStackRedhat9Kubernetes130(), ) } // Upgrade -func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2682,13 +2742,13 @@ func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes125(), + framework.WithCloudStackRedhat9Kubernetes126(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube125), + api.WithKubernetesVersion(v1alpha1.Kube126), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2697,7 +2757,7 @@ func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube126, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2712,7 +2772,7 @@ func TestCloudStackKubernetes125RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2722,13 +2782,13 @@ func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes126(), + framework.WithCloudStackRedhat9Kubernetes127(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), + api.WithKubernetesVersion(v1alpha1.Kube127), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2737,7 +2797,7 @@ func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube127, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2752,7 +2812,7 @@ func TestCloudStackKubernetes126RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2762,13 +2822,13 @@ func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes127(), + framework.WithCloudStackRedhat9Kubernetes128(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube128), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2777,7 +2837,7 @@ func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube127, + v1alpha1.Kube128, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2792,7 +2852,7 @@ func TestCloudStackKubernetes127RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2802,13 +2862,13 @@ func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes128(), + framework.WithCloudStackRedhat9Kubernetes129(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2817,7 +2877,7 @@ func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube129, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2832,7 +2892,7 @@ func TestCloudStackKubernetes128RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { +func TestCloudStackKubernetes130RedhatAndRemoveWorkerNodeGroups(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackWorkerNodeGroup( "worker-1", @@ -2842,13 +2902,13 @@ func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { "worker-2", framework.WithWorkerNodeGroup("workers-2", api.WithCount(1)), ), - framework.WithCloudStackRedhat9Kubernetes129(), + framework.WithCloudStackRedhat9Kubernetes130(), ) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -2857,7 +2917,7 @@ func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.RemoveWorkerNodeGroup("workers-2"), api.WithWorkerNodeGroup("workers-1", api.WithCount(1)), @@ -2872,25 +2932,6 @@ func TestCloudStackKubernetes129RedhatAndRemoveWorkerNodeGroups(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat8UnstackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat125()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runSimpleUpgradeFlow( - test, - v1alpha1.Kube126, - framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat126Template()), - ) -} - func TestCloudStackKubernetes126To127Redhat8UnstackedEtcdUpgrade(t *testing.T) { provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat126()) test := framework.NewClusterE2ETest( @@ -2948,22 +2989,22 @@ func TestCloudStackKubernetes128To129Redhat8UnstackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat8StackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat125()) +func TestCloudStackKubernetes129To130Redhat8UnstackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat129()) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, + v1alpha1.Kube130, framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat126Template()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat130Template()), ) } @@ -3005,20 +3046,22 @@ func TestCloudStackKubernetes127To128Redhat8StackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat9UnstackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129To130Redhat8StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat130Template()), ) } @@ -3073,21 +3116,20 @@ func TestCloudStackKubernetes128To129Redhat9UnstackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126Redhat9StackedEtcdUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129To130Redhat9UnstackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithStackedEtcdTopology()), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } @@ -3127,21 +3169,21 @@ func TestCloudStackKubernetes127To128Redhat9StackedEtcdUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125Redhat8ToRedhat9Upgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat125()) +func TestCloudStackKubernetes129To130Redhat9StackedEtcdUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube125)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes125Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } @@ -3217,37 +3259,25 @@ func TestCloudStackKubernetes129Redhat8ToRedhat9Upgrade(t *testing.T) { ) } -// TODO: investigate these tests further as they pass even without the expected behavior(upgrade should fail the first time and continue from the checkpoint on second upgrade) -func TestCloudStackKubernetes125RedhatTo126UpgradeWithCheckpoint(t *testing.T) { - var clusterOpts []framework.ClusterE2ETestOpt - var clusterOpts2 []framework.ClusterE2ETestOpt - - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130Redhat8ToRedhat9Upgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat130()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), ) - - clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes125Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) - - commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} - - clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) - - runUpgradeFlowWithCheckpoint( + runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - clusterOpts, - clusterOpts2, - commandOpts, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } +// TODO: investigate these tests further as they pass even without the expected behavior(upgrade should fail the first time and continue from the checkpoint on second upgrade) func TestCloudStackKubernetes126RedhatTo127UpgradeWithCheckpoint(t *testing.T) { var clusterOpts []framework.ClusterE2ETestOpt var clusterOpts2 []framework.ClusterE2ETestOpt @@ -3308,19 +3338,33 @@ func TestCloudStackKubernetes127RedhatTo128UpgradeWithCheckpoint(t *testing.T) { ) } -func TestCloudStackKubernetes125RedhatControlPlaneNodeUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes129RedhatTo130UpgradeWithCheckpoint(t *testing.T) { + var clusterOpts []framework.ClusterE2ETestOpt + var clusterOpts2 []framework.ClusterE2ETestOpt + + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runSimpleUpgradeFlow( + + clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.ExpectFailure(true), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes129Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + + commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} + + clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.ExpectFailure(false), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + + runUpgradeFlowWithCheckpoint( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), + v1alpha1.Kube130, + clusterOpts, + clusterOpts2, + commandOpts, ) } @@ -3388,19 +3432,19 @@ func TestCloudStackKubernetes129RedhatControlPlaneNodeUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125RedhatWorkerNodeUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130RedhatControlPlaneNodeUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(3)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), ) } @@ -3468,22 +3512,19 @@ func TestCloudStackKubernetes129RedhatWorkerNodeUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes125To126RedhatMultipleFieldsUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes125()) +func TestCloudStackKubernetes130RedhatWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes130()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - provider.WithProviderUpgrade( - provider.Redhat9Kubernetes126Template(), - framework.UpdateLargerCloudStackComputeOffering(), - ), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(5)), ) } @@ -3544,49 +3585,46 @@ func TestCloudStackKubernetes128To129RedhatMultipleFieldsUpgrade(t *testing.T) { ) } -func TestCloudStackKubernetes128To129StackedEtcdRedhatMultipleFieldsUpgrade(t *testing.T) { - provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes128()) +func TestCloudStackKubernetes129To130RedhatMultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), - framework.WithClusterFiller(api.WithStackedEtcdTopology()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), provider.WithProviderUpgrade( - provider.Redhat9Kubernetes129Template(), + provider.Redhat9Kubernetes130Template(), framework.UpdateLargerCloudStackComputeOffering(), ), ) } -// This test is skipped as registry mirror was not configured for CloudStack -func TestCloudStackKubernetes125RedhatAirgappedRegistryMirror(t *testing.T) { +func TestCloudStackKubernetes129To130StackedEtcdRedhatMultipleFieldsUpgrade(t *testing.T) { + provider := framework.NewCloudStack(t, framework.WithCloudStackRedhat9Kubernetes129()) test := framework.NewClusterE2ETest( t, - framework.NewCloudStack(t, - framework.WithCloudStackRedhat9Kubernetes125(), - framework.WithCloudStackFillers( - framework.RemoveAllCloudStackAzs(), - framework.UpdateAddCloudStackAz3(), - ), - ), - framework.WithClusterFiller( - api.WithStackedEtcdTopology(), - api.WithControlPlaneCount(1), - api.WithWorkerNodeCount(1), + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + provider.WithProviderUpgrade( + provider.Redhat9Kubernetes130Template(), + framework.UpdateLargerCloudStackComputeOffering(), ), - // framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), there is a bug that the etcd node download etcd from internet - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithRegistryMirrorEndpointAndCert(constants.CloudStackProviderName), ) - runAirgapConfigFlow(test, "10.0.0.1/8") } +// This test is skipped as registry mirror was not configured for CloudStack func TestCloudStackKubernetes126RedhatAirgappedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, diff --git a/test/framework/cloudstack.go b/test/framework/cloudstack.go index 4ca93ebefdee..a676cf1b28dc 100644 --- a/test/framework/cloudstack.go +++ b/test/framework/cloudstack.go @@ -195,6 +195,11 @@ func WithCloudStackRedhat129() CloudStackOpt { return withCloudStackKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } +// WithCloudStackRedhat130 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.30. +func WithCloudStackRedhat130() CloudStackOpt { + return withCloudStackKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + // WithCloudStackRedhat9Kubernetes125 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.25. func WithCloudStackRedhat9Kubernetes125() CloudStackOpt { return withCloudStackKubeVersionAndOS(anywherev1.Kube125, RedHat9, nil) @@ -220,6 +225,11 @@ func WithCloudStackRedhat9Kubernetes129() CloudStackOpt { return withCloudStackKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// WithCloudStackRedhat9Kubernetes130 returns a function which can be invoked to configure the Cloudstack object to be compatible with K8s 1.30. +func WithCloudStackRedhat9Kubernetes130() CloudStackOpt { + return withCloudStackKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + func WithCloudStackFillers(fillers ...api.CloudStackFiller) CloudStackOpt { return func(c *CloudStack) { c.fillers = append(c.fillers, fillers...) @@ -362,7 +372,12 @@ func (c *CloudStack) Redhat128Template() api.CloudStackFiller { // Redhat129Template returns cloudstack filler for 1.29 RedHat. func (c *CloudStack) Redhat129Template() api.CloudStackFiller { - return c.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) + return c.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) +} + +// Redhat130Template returns cloudstack filler for 1.30 RedHat. +func (c *CloudStack) Redhat130Template() api.CloudStackFiller { + return c.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) } // Redhat9Kubernetes125Template returns cloudstack filler for 1.25 RedHat. @@ -390,6 +405,11 @@ func (c *CloudStack) Redhat9Kubernetes129Template() api.CloudStackFiller { return c.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// Redhat9Kubernetes130Template returns cloudstack filler for 1.30 RedHat. +func (c *CloudStack) Redhat9Kubernetes130Template() api.CloudStackFiller { + return c.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + func buildCloudStackWorkerNodeGroupClusterFiller(machineConfigName string, workerNodeGroup *WorkerNodeGroup) api.ClusterFiller { // Set worker node group ref to cloudstack machine config workerNodeGroup.MachineConfigKind = anywherev1.CloudStackMachineConfigKind @@ -448,6 +468,12 @@ func (c *CloudStack) WithRedhat129() api.ClusterConfigFiller { return c.WithKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } +// WithRedhat130 returns a cluster config filler that sets the kubernetes version of the cluster to 1.30 +// as well as the right redhat template for all CloudStackMachineConfigs. +func (c *CloudStack) WithRedhat130() api.ClusterConfigFiller { + return c.WithKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) +} + // WithRedhat9Kubernetes125 returns a cluster config filler that sets the kubernetes version of the cluster to 1.25 // as well as the right redhat template for all CloudStackMachineConfigs. func (c *CloudStack) WithRedhat9Kubernetes125() api.ClusterConfigFiller { @@ -478,6 +504,12 @@ func (c *CloudStack) WithRedhat9Kubernetes129() api.ClusterConfigFiller { return c.WithKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// WithRedhat9Kubernetes130 returns a cluster config filler that sets the kubernetes version of the cluster to 1.30 +// as well as the right redhat template for all CloudStackMachineConfigs. +func (c *CloudStack) WithRedhat9Kubernetes130() api.ClusterConfigFiller { + return c.WithKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + // WithRedhatVersion returns a cluster config filler that sets the kubernetes version of the cluster to the k8s // version provider, as well as the right redhat template for all CloudStackMachineConfigs. func (c *CloudStack) WithRedhatVersion(version anywherev1.KubernetesVersion) api.ClusterConfigFiller { @@ -492,6 +524,8 @@ func (c *CloudStack) WithRedhatVersion(version anywherev1.KubernetesVersion) api return c.WithRedhat128() case anywherev1.Kube129: return c.WithRedhat129() + case anywherev1.Kube130: + return c.WithRedhat130() default: return nil } From a65c62d8720d432d44d694867ce5bdb94562fcf9 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Tue, 14 May 2024 15:41:23 -0700 Subject: [PATCH 140/193] Use right RHEL template during 1.30 upgrade (#8154) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/vsphere_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index e15ffc896560..195f65bba5e1 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -3223,7 +3223,7 @@ func TestVSphereKubernetes129To130StackedEtcdRedHatUpgrade(t *testing.T) { test, v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), - provider.WithProviderUpgrade(provider.Redhat129Template()), + provider.WithProviderUpgrade(provider.Redhat130Template()), ) } From 5ed68afd1d3d369da4bca5ddffd4702e0feb0802 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Wed, 15 May 2024 12:06:25 -0700 Subject: [PATCH 141/193] Update airgapped env command in create cluster docs (#8155) --- .../cloudstack/cloudstack-getstarted.md | 11 +++++++++++ docs/content/en/docs/getting-started/docker/_index.md | 8 ++++++++ .../getting-started/nutanix/nutanix-getstarted.md | 11 +++++++++++ .../en/docs/getting-started/snow/snow-getstarted.md | 6 ++++-- .../getting-started/vsphere/vsphere-getstarted.md | 11 +++++++++++ 5 files changed, 45 insertions(+), 2 deletions(-) diff --git a/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md b/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md index 0dff0c2f1c2b..cb3839bcb31b 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloudstack-getstarted.md @@ -118,6 +118,8 @@ Follow these steps to create an EKS Anywhere cluster that can be used either as ``` 1. Create cluster + + For a regular cluster create (with internet access), type the following: ```bash eksctl anywhere create cluster \ @@ -125,6 +127,15 @@ Follow these steps to create an EKS Anywhere cluster that can be used either as # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation ``` + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + + ```bash + eksctl anywhere create cluster \ + -f eksa-mgmt-cluster.yaml \ + --bundles-override ./eks-anywhere-downloads/bundle-release.yaml \ + # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation + ``` + 1. Once the cluster is created you can use it with the generated `KUBECONFIG` file in your local directory: ```bash diff --git a/docs/content/en/docs/getting-started/docker/_index.md b/docs/content/en/docs/getting-started/docker/_index.md index e5363a682728..99a5449a25f7 100644 --- a/docs/content/en/docs/getting-started/docker/_index.md +++ b/docs/content/en/docs/getting-started/docker/_index.md @@ -133,10 +133,18 @@ sudo install -m 0755 ./kubectl /usr/local/bin/kubectl 1. Create Docker Cluster. Note the following command may take several minutes to complete. You can run the command with -v 6 to increase logging verbosity to see the progress of the command. + For a regular cluster create (with internet access), type the following: + ```bash eksctl anywhere create cluster -f $CLUSTER_NAME.yaml ``` + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + + ```bash + eksctl anywhere create cluster -f $CLUSTER_NAME.yaml --bundles-override ./eks-anywhere-downloads/bundle-release.yaml + ``` + Expand for sample output: ``` diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md b/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md index 6d672cac3360..064bf5208d0e 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-getstarted.md @@ -107,9 +107,20 @@ Make sure you use single quotes around the values so that your shell does not in 1. Create cluster + For a regular cluster create (with internet access), type the following: + + ```bash + eksctl anywhere create cluster \ + -f eksa-mgmt-cluster.yaml \ + # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation + ``` + + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml \ + --bundles-override ./eks-anywhere-downloads/bundle-release.yaml \ # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation ``` diff --git a/docs/content/en/docs/getting-started/snow/snow-getstarted.md b/docs/content/en/docs/getting-started/snow/snow-getstarted.md index 26dd2b8186c9..52c227824b04 100644 --- a/docs/content/en/docs/getting-started/snow/snow-getstarted.md +++ b/docs/content/en/docs/getting-started/snow/snow-getstarted.md @@ -107,13 +107,15 @@ Make sure you use single quotes around the values so that your shell does not in 1. Create cluster - a. For none air-gapped environment + For a regular cluster create (with internet access), type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml ``` - b. For air-gapped environment + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml \ diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md b/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md index aad6a7cccc28..18e29b4407dd 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-getstarted.md @@ -110,9 +110,20 @@ Make sure you use single quotes around the values so that your shell does not in 1. Create cluster + For a regular cluster create (with internet access), type the following: + + ```bash + eksctl anywhere create cluster \ + -f eksa-mgmt-cluster.yaml \ + # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation + ``` + + For an airgapped cluster create, follow [Preparation for airgapped deployments]({{< relref "../install#prepare-for-airgapped-deployments-optional" >}}) instructions, then type the following: + ```bash eksctl anywhere create cluster \ -f eksa-mgmt-cluster.yaml \ + --bundles-override ./eks-anywhere-downloads/bundle-release.yaml \ # --install-packages packages.yaml \ # uncomment to install curated packages at cluster creation ``` From 4fe9876b6b4ea6b57fcbfda230e985dc49d7a8ae Mon Sep 17 00:00:00 2001 From: Aravind Ramalingam <60027164+pokearu@users.noreply.github.com> Date: Wed, 15 May 2024 21:37:25 -0700 Subject: [PATCH 142/193] Adding a registry mirror packages test for vsphere (#8159) --- pkg/curatedpackages/regional_registry.go | 2 +- test/e2e/curatedpackages.go | 4 ++++ test/e2e/vsphere_test.go | 17 +++++++++++++++++ test/framework/cluster.go | 20 ++++++++++++++++++-- 4 files changed, 40 insertions(+), 3 deletions(-) diff --git a/pkg/curatedpackages/regional_registry.go b/pkg/curatedpackages/regional_registry.go index 077cac7188bf..c977282102ab 100644 --- a/pkg/curatedpackages/regional_registry.go +++ b/pkg/curatedpackages/regional_registry.go @@ -17,7 +17,7 @@ import ( const ( devRegionalECR string = "067575901363.dkr.ecr.us-west-2.amazonaws.com" devRegionalPublicECR string = "public.ecr.aws/x3k6m8v0" - stagingRegionalECR string = "TODO.dkr.ecr.us-west-2.amazonaws.com" + stagingRegionalECR string = "067575901363.dkr.ecr.us-west-2.amazonaws.com" ) var prodRegionalECRMap = map[string]string{ diff --git a/test/e2e/curatedpackages.go b/test/e2e/curatedpackages.go index b12718fde132..4ba25829bcdd 100644 --- a/test/e2e/curatedpackages.go +++ b/test/e2e/curatedpackages.go @@ -48,6 +48,10 @@ func runDisabledCuratedPackageInstallSimpleFlow(test *framework.ClusterE2ETest) test.WithCluster(runDisabledCuratedPackage) } +func runCuratedPackageInstallSimpleFlowRegistryMirror(test *framework.ClusterE2ETest) { + test.WithClusterRegistryMirror(runCuratedPackageInstall) +} + func runCuratedPackageRemoteClusterInstallSimpleFlow(test *framework.MulticlusterE2ETest) { test.CreateManagementClusterWithConfig() test.RunInWorkloadClusters(func(e *framework.WorkloadCluster) { diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 195f65bba5e1..33a1de6e51c3 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -2030,6 +2030,23 @@ func TestVSphereKubernetes130BottlerocketRegistryMirrorOciNamespaces(t *testing. runRegistryMirrorConfigFlow(test) } +func TestVSphereKubernetes129UbuntuAuthenticatedRegistryMirrorCuratedPackagesSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest( + t, + framework.NewVSphere(t, framework.WithUbuntu129(), framework.WithPrivateNetwork()), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithAuthenticatedRegistryMirror(constants.VSphereProviderName), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageInstallSimpleFlowRegistryMirror(test) +} + // Clone mode func TestVSphereKubernetes128FullClone(t *testing.T) { diskSize := 30 diff --git a/test/framework/cluster.go b/test/framework/cluster.go index cad1457101d0..4a179abb195a 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -65,6 +65,7 @@ const ( hardwareYamlPath = "hardware.yaml" hardwareCsvPath = "hardware.csv" EksaPackagesInstallation = "eks-anywhere-packages" + bundleReleasePathFromArtifacts = "./eks-anywhere-downloads/bundle-release.yaml" ) //go:embed testdata/oidc-roles.yaml @@ -662,7 +663,7 @@ func (e *ClusterE2ETest) DownloadImages(opts ...CommandOpt) { if getBundlesOverride() == "true" { var bundleManifestLocation string if _, err := os.Stat(defaultDownloadArtifactsOutputLocation); err == nil { - bundleManifestLocation = "eks-anywhere-downloads/bundle-release.yaml" + bundleManifestLocation = bundleReleasePathFromArtifacts } else { bundleManifestLocation = defaultBundleReleaseManifestFile } @@ -683,7 +684,7 @@ func (e *ClusterE2ETest) ImportImages(opts ...CommandOpt) { registryMirrorHost := net.JoinHostPort(registyMirrorEndpoint, registryMirrorPort) var bundleManifestLocation string if _, err := os.Stat(defaultDownloadArtifactsOutputLocation); err == nil { - bundleManifestLocation = "eks-anywhere-downloads/bundle-release.yaml" + bundleManifestLocation = bundleReleasePathFromArtifacts } else { bundleManifestLocation = defaultBundleReleaseManifestFile } @@ -1297,6 +1298,21 @@ func (e *ClusterE2ETest) WithCluster(f func(e *ClusterE2ETest)) { f(e) } +// WithClusterRegistryMirror helps with bringing up and tearing down E2E test clusters when using registry mirror. +func (e *ClusterE2ETest) WithClusterRegistryMirror(f func(e *ClusterE2ETest)) { + e.GenerateClusterConfig() + e.DownloadArtifacts() + e.ExtractDownloadedArtifacts() + e.DownloadImages() + e.ImportImages() + e.CreateCluster(WithBundlesOverride(bundleReleasePathFromArtifacts)) + defer func() { + e.GenerateSupportBundleIfTestFailed() + e.DeleteCluster(WithBundlesOverride(bundleReleasePathFromArtifacts)) + }() + f(e) +} + // Like WithCluster but does not delete the cluster. Useful for debugging. func (e *ClusterE2ETest) WithPersistentCluster(f func(e *ClusterE2ETest)) { configPath := e.KubeconfigFilePath() From eba65ffe779f78737135899538910515f9839ac0 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 15 May 2024 22:32:24 -0700 Subject: [PATCH 143/193] [PR BOT] Generate release testdata files (#8160) --- .../testdata/main-bundle-release.yaml | 78 +++++++++---------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 94b570a4707f..0d166fa3745a 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -57,7 +57,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-38-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-39-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -289,10 +289,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-38-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-39-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.25.16 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-38.yaml - name: kubernetes-1-25-eks-38 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-39.yaml + name: kubernetes-1-25-eks-39 ova: bottlerocket: {} raw: @@ -507,7 +507,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-38-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-39-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -741,7 +741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-38-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-39-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -835,7 +835,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-34-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-35-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1067,10 +1067,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.15-eks-d-1-26-34-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.15-eks-d-1-26-35-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.26.15 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-34.yaml - name: kubernetes-1-26-eks-34 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-35.yaml + name: kubernetes-1-26-eks-35 ova: bottlerocket: {} raw: @@ -1285,7 +1285,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-34-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-35-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -1519,7 +1519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-34-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-35-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -1613,7 +1613,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-28-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-29-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1845,10 +1845,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.12-eks-d-1-27-28-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.27.12 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-28.yaml - name: kubernetes-1-27-eks-28 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.13-eks-d-1-27-29-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.27.13 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-29.yaml + name: kubernetes-1-27-eks-29 ova: bottlerocket: {} raw: @@ -2063,7 +2063,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-28-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-29-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -2297,7 +2297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-28-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-29-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -2391,7 +2391,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-21-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-22-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -2623,10 +2623,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.8-eks-d-1-28-21-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.28.8 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-21.yaml - name: kubernetes-1-28-eks-21 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.9-eks-d-1-28-22-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.28.9 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-22.yaml + name: kubernetes-1-28-eks-22 ova: bottlerocket: {} raw: @@ -2841,7 +2841,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-21-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-22-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3075,7 +3075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-21-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-22-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3169,7 +3169,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-10-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-11-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -3401,10 +3401,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.3-eks-d-1-29-10-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.29.3 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-10.yaml - name: kubernetes-1-29-eks-10 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.4-eks-d-1-29-11-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.29.4 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-11.yaml + name: kubernetes-1-29-eks-11 ova: bottlerocket: {} raw: @@ -3619,7 +3619,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-10-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-11-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3853,7 +3853,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-10-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-11-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3947,7 +3947,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-4-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -4179,10 +4179,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-eks-d-1-30-3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-eks-d-1-30-4-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.30.0 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-3.yaml - name: kubernetes-1-30-eks-3 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-4.yaml + name: kubernetes-1-30-eks-4 ova: bottlerocket: {} raw: @@ -4397,7 +4397,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-4-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -4631,7 +4631,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-4-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: From ce336c3d3d5e8e66e228d08aa6c9584027c42bd7 Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Fri, 17 May 2024 10:26:28 -0700 Subject: [PATCH 144/193] Document steps to expose metrics for EKS-A components (#7970) * Document steps to expose metrics for EKS-A components * Add design doc for securely exposing metrics of all EKS-A components --- designs/expose-metrics.md | 371 ++++++++++++++++++ designs/images/expose-metrics.png | Bin 0 -> 316838 bytes .../observability/expose-metrics.md | 175 +++++++++ .../clustermgmt/observability/overview.md | 3 +- 4 files changed, 548 insertions(+), 1 deletion(-) create mode 100644 designs/expose-metrics.md create mode 100644 designs/images/expose-metrics.png create mode 100644 docs/content/en/docs/clustermgmt/observability/expose-metrics.md diff --git a/designs/expose-metrics.md b/designs/expose-metrics.md new file mode 100644 index 000000000000..0fb8b41b29d6 --- /dev/null +++ b/designs/expose-metrics.md @@ -0,0 +1,371 @@ +# Expose metrics for all EKS Anywhere components securely + +## Problem Statement + +Customers want to scrape the metrics of various EKS Anywhere components with Prometheus in order to understand and monitor the state of a cluster. In EKS Anywhere, metrics of only some Kubernetes system components (kube-apiserver, kubelet, coredns, kube-vip, cert-manager, cilium) are exposed by default. Other system components such as `kube-controller-manager` and `kube-scheduler` are configured with the default `--bind-address=127.0.0.1` (localhost). + +Below are some examples of customer requests for exposing metrics: + +* https://github.com/aws/eks-anywhere/issues/4299 +* https://github.com/aws/eks-anywhere/issues/4405 +* https://github.com/aws/eks-anywhere/issues/7106 + +## Goals and Objectives + +As an EKS Anywhere user, I would like to: + +* Expose all Kubernetes system component metrics securely with authentication and authorization enabled +* Expose metrics from all EKS Anywhere, CAPI etcd components, and CAPI provider-specific components securely + +## Statement of Scope + +**In Scope:** + +Exposing metrics securely for the following components: + +1. Kubernetes system components + +* kube-controller-manager +* kube-scheduler +* kube-proxy + +2. EKS Anywhere components + +* eks-anywhere-controller-manager controller +* eks-anywhere-packages controller + +3. CAPI etcd components + +* etcdadm-bootstrap-provider controller +* etcdadm-controller-controller-manager controller + +4. CAPI provider-specific components + +* capt-controller-manager controller +* capc-controller-manager controller +* capv-controller-manager controller +* capx-controller-manager controller + +**Out of Scope:** + +Following components are not considered for exposing metrics securely: + +* Snow provider (capas) and Docker provider (capd) +* ECR-credential-provider + +**Future Scope:** + +* Securely expose metrics for all other components (kube-vip, coredns, cilium, and cert-manager) + +## Current State of EKS Anywhere components + +![table](images/expose-metrics.png) + +## Overview of Solution + +There are two general solutions proposed for different components here: + +* For Kubernetes system components, documenting the steps to configure some proxy as a daemonset. No code changes needed in EKS Anywhere +* For all other controller-runtime based CAPI and EKS Anywhere components, implementing the CAPI [diagnostics](https://main.cluster-api.sigs.k8s.io/tasks/diagnostics) feature and exposing the bind address with 0.0.0.0 + +For Kubernetes system components, we don't just want to change the default bind address to 0.0.0.0 because configuring it to bind on all interfaces might expose the metrics publicly over the internet on a node which has any interface exposed to the internet even if that component has authentication and authorization enabled. (Check this [issue](https://github.com/kubernetes/kubeadm/issues/2244#issuecomment-763294722) for more details). Also, it goes against the principle of security [hardening](https://en.wikipedia.org/wiki/Hardening_(computing)) where the default configuration should be kept minimal to reduce the attack surface of the system. For all other controller-runtime based components, it is best to implement the diagnostics feature that CAPI has introduced to match the core controllers and also have consistency across all these components. It also removes the current dependency we have on the [kube-rbac-proxy](https://github.com/brancz/kube-rbac-proxy) for capc controller. Overall, there will be no API changes in the cluster spec for any of the components. + +#### **Kube-Apiserver and Kubelet:** + +These components already perform bearer token authentication and RBAC authorization for client requests and they are already configured to allow listening on all interfaces and IP address families. No further action needs to be taken to expose metrics for these components securely. + +#### **Kube-Controller-Manager, Kube-Scheduler and Kube-Proxy:** + +Kube-controller-manager and kube-scheduler already perform bearer token authentication and RBAC authorization whereas kube-proxy does not but all three components listen only on the localhost (127.0.0.1) for client requests. We can document the steps to be followed to configure some proxy as a Daemonset on the cluster which forwards the client requests to the metrics endpoint for each component. The proxy pods must run in the `hostNetwork` so that they can access the loopback interfaces of the corresponding pods. There will be no changes done in EKS Anywhere to configure these for the customers. + +**Documentation Steps:** + +1. Create a cluster role object which gives permissions to get the metrics + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: + - nonResourceURLs: + - "/metrics" + verbs: + - get +``` + +2. Create a cluster role binding object which binds the above cluster role to the service account of the monitoring pod + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-reader-binding +subjects: +- kind: ServiceAccount + name: prometheus-server + namespace: observability +roleRef: + kind: ClusterRole + name: metrics-reader + apiGroup: rbac.authorization.k8s.io +``` + +3. Create a config map object which stores the proxy configuration to route the request to the components + +**HAProxy configuration example:** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: metrics-proxy +data: + haproxy.cfg: | + defaults + mode http + timeout connect 5000ms + timeout client 5000ms + timeout server 5000ms + default-server maxconn 10 + + frontend kube-proxy + bind ${NODE_IP}:10249 + http-request deny if !{ path /metrics } + default_backend kube-proxy + backend kube-proxy + server kube-proxy 127.0.0.1:10249 check + + frontend kube-controller-manager + bind ${NODE_IP}:10257 + http-request deny if !{ path /metrics } + default_backend kube-controller-manager + backend kube-controller-manager + server kube-controller-manager 127.0.0.1:10257 ssl verify none check + + frontend kube-scheduler + bind ${NODE_IP}:10259 + http-request deny if !{ path /metrics } + default_backend kube-scheduler + backend kube-scheduler + server kube-scheduler 127.0.0.1:10259 ssl verify none check +``` + +4. Create a Daemonset object to deploy the proxy so that metrics are exposed on all the nodes + +**HAProxy daemonset example:** + +```yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: metrics-proxy +spec: + selector: + matchLabels: + app: metrics-proxy + template: + metadata: + labels: + app: metrics-proxy + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + hostNetwork: true + containers: + - name: haproxy + image: haproxy:2.9 + env: + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: TOKEN + value: "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + ports: + - name: kube-proxy + containerPort: 10249 + - name: kube-ctrl-mgr + containerPort: 10257 + - name: kube-scheduler + containerPort: 10259 + volumeMounts: + - mountPath: "/usr/local/etc/haproxy" + name: haproxy-config + volumes: + - configMap: + name: metrics-proxy + name: haproxy-config +``` + +5. Verify that the metrics are exposed to the monitoring pods by running the following command from the container + +```bash +export TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) +curl -H "Authorization: Bearer ${TOKEN}" http://{node-IP}:{component-port}/metrics +``` + +If some customer doesn't need to expose the kube-proxy metrics, then the daemonset can be configured to run the proxy pods on only the control plane nodes using [node labels](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#built-in-node-labels) and [node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) or using [node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). Since kube-proxy is itself a daemonset that runs on all nodes, exposing its metrics requires running the metrics proxy on all nodes. + +This solution is also extensible as the above steps can also be applied to other external or custom components which don't expose metrics endpoint by default. It also avoids any security risk of exposing metrics to the internet associated with configuring the bind-address for kube-controller-manager and kube-scheduler. This is explained below in the alternate solutions section. + +Another advantage is that it provides flexibility to the customers in choosing any proxy based on their preference. Some of the most popular proxies include [nginx](https://github.com/nginx-proxy/nginx-proxy), [envoy](https://github.com/envoyproxy/envoy), [haproxy](https://github.com/haproxy/haproxy), [traefik](https://github.com/traefik/traefik), etc. We will document an example config for HAProxy and customers can configure it similarly for other proxies. + +One drawback is that it provides a bad User Experience to the customers as they need to configure these additional objects for each of their clusters. But as the number of users which require this feature is not large enough to justify supporting it in EKS Anywhere, this is a good workaround solution to document it to the users. Even kubeadm doesn't support it for the same reason. For more details, check out this [issue](https://github.com/kubernetes/kubeadm/issues/2388#issuecomment-776073834) + +A disadvantage is that prometheus associates metrics with pod names but since the pods are behind a proxy, the proxy's pod name will be used instead for the metrics. (see [issue](https://github.com/prometheus-operator/kube-prometheus/issues/718#issuecomment-776360908) for more details) + +#### **EKS Anywhere Controller Manager and EKS Anywhere Packages:** + +These components do not perform any kind of authentication or authorization for client requests and listen only on the localhost by default. We will implement the diagnostics feature to expose metrics securely on all interfaces. + +#### **Etcdadm Bootstrap Provider Controller Manager and Etcdadm Controller Controller Manager:** + +These components also do not perform any kind of authentication or authorization for client requests and listen only on the localhost by default. We will implement the diagnostics feature to expose metrics securely on all interfaces. + +#### **Capi-Controller-Manager, capi-kubeadm-bootstrap-controller-manager and capi-kubeadm-control-plane-controller-manager:** + +These components already implement the diagnostics feature to expose metrics securely on all interfaces. No further action needs to be taken to expose metrics for these components securely. + +#### **EKS Anywhere supported CAPI providers (capv, capx, capt, capc):** + +For capc, we have kube-rbac-proxy already implemented as a secure way to expose metrics but it listens only on the localhost. We can remove the dependency on kube-rbac-proxy for capc and implement the diagnostics feature. This would enable us to expose metrics securely on all interfaces at diagnostics address `:8443` + +For capv, the diagnostics feature has already been implemented in the [latest](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/tag/v1.9.2) release but our `eks-anywhere-build-tooling` repo points to an [older release](https://github.com/aws/eks-anywhere-build-tooling/blob/main/projects/kubernetes-sigs/cluster-api-provider-vsphere/GIT_TAG) version which does not support the diagnostics feature and defaults to `127.0.0.1` for `--metrics-bind-addr` flag. We would just need to bump the capv version to the latest release version. + +For capx, the diagnostics feature has already been implemented and we also point to the latest release in the `eks-anywhere-build-tooling` repo. No further action needs to be taken to securely expose metrics for capx. + +For capt, it does not perform any kind of authentication or authorization for client requests and listens only on the localhost by default. We can implement the diagnostics feature to expose metrics securely on all interfaces. + +## Implementation Details + +**Diagnostics Feature for all EKS Anywhere, CAPI etcd and CAPI provider-specific controllers:** + +Diagnostics feature - https://main.cluster-api.sigs.k8s.io/tasks/diagnostics + +``` +spec: + containers: + - command: + - /manager + args: + - --diagnostics-address:=`${CAPI_DIAGNOSTICS_ADDRESS:=:8443}` + - --insecure-diagnostics:=false + ports: + - containerPort: 8443 + name: metrics + protocol: TCP + ... +``` + +Add the above args and metrics port to the controller manager deployment in `config/manager/manager.yaml` + +``` +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +``` + +Add above rules to the manager cluster role in `config/rbac/role.yaml` + +``` +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create + +func main() { + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) +} +``` + +Add the diagnostics options capi flags and rbac [markers](https://book.kubebuilder.io/reference/markers/rbac) to the controller binary in `main.go` + +## Testing + +* E2E tests for the diagnostics feature +* Unit tests for any additional utility functions implemented + +## Documentation + +* Add necessary steps on configuring the metrics proxy daemonset in the cluster to the EKS Anywhere docs +* We can reference the [CAPI](https://main.cluster-api.sigs.k8s.io/tasks/diagnostics#scraping-metrics) documentation on diagnostics feature for scraping metrics from the controllers + +## Alternate Solutions Considered + +### Using kube-rbac-proxy for all components + +Using kube-rbac-proxy for all EKS Anywhere components to expose metrics securely by enforcing RBAC policies and authentication. In this approach, metrics requests are routed through the kube-rbac-proxy, which sits between the client and the API server. Kube-rbac-proxy authenticates the client using various authentication mechanisms such as bearer token, client TLS certificates, request header authentication, etc. It then verifies the client's RBAC permissions and only allows access to the metrics endpoint if the client has the necessary privileges. For more details, check out option 2 [here](https://quip-amazon.com/II8XAy90Pq2v/Expose-metrics-of-EKS-A-components#temp:C:fRf4452d35522194e5bb535f4d14) + +This approach enables authentication and RBAC authorization for all the components but requires maintaining an additional upstream dependency. Some Kubernetes system components already do have authn/authz enabled by default and capi also introduced the diagnostics feature recently which is the preferred way of enabling authn/authz for the controller-runtime based components. So using kube-rbac-proxy as an additional layer of security for these components is not necessary. + +Another thing to note is that kube-rbac-proxy project is in alpha stage and may have significant changes in the future. So, it's better to not have a dependency on it in the future if there are better alternatives available. + +### Configurable bind-address for kube-scheduler and kube-controller-manager + +We can allow customers to configure the `--bind-address` flag for these components through the cluster spec to allow listening on all interfaces and IP address families so that prometheus or any other component with appropriate RBAC permissions can scrape the metrics endpoint for these components securely. + +Currently, we cannot make the `--metrics-bind-address` flag configurable for kube-proxy through the cluster spec as CAPI doesn't support configuring kube-proxy configuration. In the future, we can either patch CAPI to enable support for configuring kube-proxy or [disable](https://github.com/kubernetes-sigs/cluster-api/issues/4512#issuecomment-1267092583) installing kube-proxy from CAPI and install it in EKS Anywhere instead. + +**Schema:** + +```yaml +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: mgmt-cluster +spec: + ... + controlPlaneConfiguration: + ... + controllerManagerExtraArgs: + bindAddress: "0.0.0.0" + schedulerExtraArgs: + bindAddress: "0.0.0.0" +``` + +The main concern with making bind-address configurable is that configuring it to bind on all interfaces might expose the metrics publicly over the internet on a node which has any interface exposed to the internet. In a cluster with single control plane node, binding it to the control plane node ip address would solve the issue but it wouldn't work for HA clusters with multiple control plane nodes which is usually the case in a production environment. Another solution would be to apply [firewall rules](https://github.com/kubernetes/kubeadm/issues/2244#issuecomment-763533964) on every node before binding to 0.0.0.0 but this is not a good idea either. + +Another thing to note is that it is tough to validate the range of IP addresses that bind address can be allowed to be configure with. Even Kubernetes does not do any such validation for these components. Only validation that can be done is that the address is in a proper IPv4/IPv6 format. If a user configures some unreachable address, it would be hard to debug the issue with the component. + +**Implementation:** + +```yaml +controllerManagerExtraArgs: + "bind-address": "0.0.0.0" +schedulerExtraArgs: + "bind-address": "0.0.0.0" +``` + +These flags will be fetched from the cluster spec and added to the [controllerManagerExtraArgs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#controllermanager-flags) and [schedulerExtraArgs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#scheduler-flags) map in the `ClusterConfiguration` object during create and upgrade operations for generating control plane CAPI spec. + +**Testing:** + +* E2E tests will be required to test whether the flags are actually configured for Kubernetes system components + +**Validations:** + +* Validate that the `bind-address` flag is in a proper format similar to how Kubernetes does it [here](https://github.com/kubernetes/kubernetes/blob/f4e246bc93ffb68b33ed67c7896c379efa4207e7/pkg/proxy/apis/config/validation/validation.go#L274) for kube-proxy + +**Documentation:** + +We can add `controlPlaneConfiguration.controllerManagerExtraArgs.bindAddress` and `controlPlaneConfiguration.schedulerExtraArgs.bindAddress` as an optional configuration to our EKS Anywhere docs + +## References + +* https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/ +* https://github.com/kubernetes/kubeadm/issues/1285#issuecomment-459157839 +* https://github.com/kubernetes-sigs/controller-runtime/pull/2407 + diff --git a/designs/images/expose-metrics.png b/designs/images/expose-metrics.png new file mode 100644 index 0000000000000000000000000000000000000000..7bb1f8ddec650ecd959217cb6d883f06bfa7c99a GIT binary patch literal 316838 zcmd42WmFv9wl0j*Sa5fT;NC#v9y~%IXhWbOxCM7>+=9E4K!QUOEO>AY8l2$n&R68! zXP-UJH^#l^=j}0?s;*j6wWdAunTv2O4J8~*3QPn91RND*`R51-XhR4HD4ZZP;0m9W z6*U3^=0__zIV}}AIXW#TdkZTYa|8tC@T3&wLGL8K#ey;UI$IR zX}@_AzX>=We7lf3L6**Ql+PE`Mf7)p7J~UCX!oA<->16A}gRw$<&+rlwLI_>vkN%}%MsLi~*9P%mLWGxGrc;AMMCwPx z!B(b)(!sPEj$_!d>W=M#1*7AV_)r>98G=8nGkApw;io4&E51uzb|p>%zaH-xMK(Bo z@dvYn4=OH{iHBF#TOOp@1!K8o))-qfW%+691XkhC*6Xibl0YTT7aaQZ;4FUMP*|d3 z;Y>#NOoQYml%A1#rl#>D$E`W18VEDn9MOK(FFldDCB9~0@DQG1PSZ1PEG)vsW|B;Z zx?rQ1srJ5;^F*Mj9j4dBpq9*`z1x;<9^=kk!6fwDLUX;w{Kz?xF&`$XibpKiPyTWF z-dU1{zfqTfr7VZ%)G?=jhfCkfv)g!*!=!)Rf<@`FZ*nLNxb=3hYXBbqt9#YUbMQ*O z?^YpgzuF+b7B5XoYEa2y_$Ow{;Gq1Iwco$-W@w5XZe=aVQ4o0WP{`;|bdfpekSu;S zpdzH*W_}dFgy_I^!ckRv)38aL`Y*BTx z6#8-(Jx~S(j@pkv$`s^6hc;t^GYvYHp`S*4ALOKk`ozCui42VR<_Eb0q5;~J4DMHC z8U%JSG@c)1?zk*;pmrG|IOa#$H*u10G4*8Bexe=+BIWY3kwk!_zX-s6x7gR z?ttS>WEaZwV`^EvlE@b^wg4d_IKQA)+k%ay51BYBLXJ7Vv(&W|=_UIov|s-3wIT~wWdP;ThHNhsezl`bc>HN39p+SIIyefrn5?X=IdqK$YxsdzAT&$7cO z@QX|VZnB;>^Oa*qFGnTE4M(4^(BDcBjEazkcCXddL)lvvZ}xl8Jyi?t1Gbcm7TqQx z6-FBh4U{+!uBB?p_?rovkOdwgFPAUC7nmyhTbYwl7>@^^mS8bKnTa|9nf(J3oh@Bb zd`#k(I5d@VBR-=1h=*ccE;6b9e)lh8o0w zviwL^p8Dmh`VoBPRTn;UW~CRig5}#)-z|PRuIXp*|IuixW@35x;ueHdz5?JdgyNm4eVrG z$e6c8WMCQ>Nf&AViRpvA3jLJw?$fr$FOpwM?<|D~>$K`@>h9j9Oc)K747#NcXBqQ8 zbl%=83{W7^fnLWZc5k4_fI9__9oKo69p+3Pix;H{SK0byu zp*p`aBJJo=TeEV$ zaqutq9LHD;8&m59o99rcaU$V50piqsfjvWiy`@U&uela&{^T=J=E>$d_QIALBDlPZ zZx>AO-{mc|e+>~Qeb04k0kw{|(6&DM@w|6z*eTIG;}0bVA;*_jA-`;`BkCH?Ja=?< zri6=yy)*8F-E2SB2h{7^h}*Qz&di1HQS2)2o$apfaqOX|3R-}iDNiDhyW8fW%^l=n znlbNV(qdSt9$fgHpSg;y2v+}e!1vSM%cqA?!(yE4Y9>OK2@Z}6A}3zWC9S?$z3x<> zir#oXvPa&Zn$#}V=lREOnftxH)5Fs8X4PcDJmH_MRlTK|y`nYZtylYJ)A~K)y^RZA z13i%_Z`yZ*ZzlvETr@+{J_bn6ep0NkL{ zpkNuUHo8>mCXP#OIP_;h+GQGTq9eL7w&w&h z_{_9*OrN;7MD&NuQkR2Ugo;zdeAyrO;}tYmN3KmKzrPUJLe*zN4f>1yRRfJ;OFQ;U)x&t;2O+1X^Xek<`I z<`?2OM9Cz+Gu`v=_t5)k`+QS>XMb&O{Q1T5;mPik<(T`dFXFhQ93&egxO%?nt%2>JKvwaDS~{v)d=xYFe~!>GuelG<9>5}-neI9 zI#&9${Q9}(d91EOZ(Wl+*t4m+kE>Ch+{r07n{qk3tPTqE~-@Qmu zpC}HrDD`-CqO0l@Su0ducTu~z6(d|yUu-POr$%7|zHvOHo;@rfwZQ$( z&x3xeU90Cf2A$N5dto1YZ?|R5Z>!Jzpr5o+*Knf(Y%J>h>u7t+dcc}%YVV84VrBE_ zY1v+d`uzL(L6JTC`g8DU?HTvB#MH*D+j8>8=l+J7hKfau@8ff{Gk$I+z5b`%2m3#p zYrQHCVDmZdmCN+&&^E>prJ~qhv{SbhxB3MW8lLY*oDaJb4_R?Tzsm9W4qx8&J1o5D z8y|zc8QxYbR~(A+j6H`K-+Et$Y<6|_-%1fZlzN+=ZcpUc`7Mw|l2iI}`AS?_%<2DX z2wRwG+9AUw_q&z6yXy=KjQvE5>u2h_eNAC#7)~;eRzH$r9>Bb-Ed_5OBPk* z>wVW}0J}z*>#A6&t0O!G&Orz$h!hB@z!@U&l|-cc_qie>8v^p*>qrO)VO9tz|7xQF z93MY%!1uAwKS$(*Py}?~7Xk2f&qeyL)@Vby$p3YY!U^0%ka;1eq5>RWm^qo7+c{g> zyTqbmHvtBIM}+j2wS+MAklyW2WEj)MSk z7Xwah&0U~$?zT2|&SLHo41ZgQ0q2jGc^K&aHgU0*V9-_9qLZ_CGN%*f=H=#Pki?{; zqk}lTwGewQulTR-z%K~~OBWXhF&-W_H#crK0d9LI7!RMQs3;FFKMy}Y7tn&s*~88S z>ds~7%=phB|22-hxwDy*m4l0wy&c`-xKLAjR~HEehR2Ei{rTrP&E2j3dnP;Qf4vs) zf;^9Rc=)(^dHy{%&=vA{RZPpu-P}e`-pUqOGhhx$K3-ukv_i@;|!j{zq34 zLH_^j`X9IcXV+KG=1y|X~tK1{*pYd7m-o^OOuC8Bh+W67sdM?oZDQzH&_1~^}7l_9e!ZBPvDSC#bEl=xJ*bSud{HrmkD?emYX|2IBBEp% zk|fU`QjCI2inuUIuAeJn--Yp0@&K1nWit#wr4mD^DcoOxN8S+P0-KBDBmUZu*LBeX zy_NmZLW>VeTN+dNXJtk(5qEUuGwWoZZS45_Z#z>zQ}Nh!Lph~B_rln@i(ot?Cl-DO zhulPwbe!xa(jQ}A+cbrxshV|;oaIE749RB_EBVA7y!3+nERq5reDRb7n@fP~KV3sW zMdfb0L`dg%aPSUMyXTij^)mT^R~e%K=rXZ)PRo~V=Nyu9qcR=Prg-@)NA?;Cd6S7! zZMlzrBr&6V6bn92Sr(!m@sRS{zIjy>iaK zla$%uvztA>Q-CME33VjA#xp6yDI*GK#Lq4E6y)20{qjZPaMx_a)z!`@^ zL3Nsv5|H&`zWuM=)>-U9zCNF3SADtkno|IU_!b*_z7qpH@4Twk)dBNAD`(EmX8Y(K zq-h^tiipqWWhwIQ(@(?52A7^^eRRMq{0ezd6QBU?x#$Q*?D)4iOFqIxHI7Y-Gwv0F zF>Egx*_4kNa?im$rr%p)K&9DJRTCdJ1cB!qj~jKQAE7|J@dc#c{p|I>G#(A5+zfa< zL{7u!9zZJ&WBI-%1YjNq1Wdh(X<-yeM>X=DGzCEG=WzI7j1(Y2w%P|AL+5dI%wN;O zBj}ql?_$wQE731sy&6XUdU$jA>s8K8rPkRqL%F}2XBcFF@z#3Jqgh`1>|4qv$KH2Flf_tg&G(eTW1dIpl!A~ZtKi@=A{q@edz`~HoqW37azyRLE zm$9KBTuHKKz%Mx_uV*?BStjdQg!{dA-RNb_iTZiNQPD(jcarlE#gcBB4?j+`Do)Ec zPcalucV7uFOFw(_$9iH@TW)Ozre|@%f)C3<%x@k55!uEHu#T*?i|>DB^xVjDdtQiQvhAfE&Zdfno36s-ApOBX{~1 z-})JgNQ)ew^Gmn1til%8wV&~$q%DW27t@6&P|w;k|10&-X;U%E*kM*FzL!+gKz z!b!?&wCg7UV!t+|9-k>ptV*3|o*ojSbYw&dueVp2oKfu52H@+trl}hKV z9g#IW$l==K*MHQQBpK}s%)h=_I?CUblVQkIx%P$Gqsw}<(M39~Q}Npl+Izga+YKq& zP~^Vv{oIW<(~eK@*6osB>4Y!1uSP>RaKTIdmS?|S)4p%|iJ1McTQl6kKeyxU?{dL6L%kXe%;oP2eW`Ijhf zo<1AY)XLH3r=P$92|} zmFm>TUhr)rTYT0uk}F?<)NtZSgU<~RN;t%hItdG`zkFi}sB+pyrdV|laJ5bKw+6_jv2Non82hvdez<(=tJd2%)EC;7%SoH;+|{zZKygK`0|8tU4D}z?a^(_!?S_4k)by!}g3%#lx6rPipx@^PEANH}c)PAi} zgqLeWNW)eObE_!O`8iQ*HKF!#|`Rv;Wm_@IM;vF2nAX259&YN+LG_614)L z;nQj!VX}O^j7xy#xXG)JVbvx6jjW8>u;bB*O=*(xI6~YBNp4itlpU-l8>9YN?q`UQw8XqP}(RGf| z^A#{?rGu6myAI@*%Pt-<-1vN!d{TB>t zCG5}*Ji-N36L}k9*&Y`9y%)g9vvualX!?+15V1dL;uJLhc7VrrO&!`vo?NOp%~?NfI?7&>$&Xz&D6$CgJRMbYnBFl|gj}0kZNclH8SXlw+X1%O@H@#^l*i(%^f&xu8;d{ve$KW4 z@IX>SA+kce%VoA z=51n^UeEYeR~xP?*$K5(W_Zmn9eu9rE*@XL-O;!=z#_C-m+h{l00y`T z#3%b%yFXTPpbIQf^Xhg?4*4D`=cqfi`7yMSa}B{!d3I!dX{Vmz;J!H=O@g&QCdmpvj5L=Bhv1Tv(@G&3l?4kg@M+0j+{Q z_GjCfXVplC<8%gU))j}(iW#U)SbAgihsyrDv9~pahzbBl!j`k&ATRDn;!8B!O^wISilstBuAR?;WEr%sBg4 z#QEpD4mUM9cbwn%zwfMcu*L9v-^{m+U@wNp&S;QTz>=THw}4lh;KgZfRkBoS)EMPtxu^}<``A!&%suxLG?7YgFLmB@`a2_-mrpIZ;x zv9iK=n7(4{(eDXVZ+|LN)VKRql;tE(PVdhLI+O_dhF~8NEj;^ko^X-3_GH>DR@4kf zT#@W&Tx#P+wpTu5EM$y3oaBvVo+SVk99zfXH4q=I+a(j$U47B{chNOWD^WL=58B1o zP5|6ctQp{s^ef%D0bI4ApC0q{#iQE5n1>|qFU_XxuFVv>PiuYjdOW1=k76nNvX8iz zO~btrFoLC}`81dYE&)~i?I0nQ&}~gNOznHQJPM$vO>8hrstFQlw;Qk~F^Vx%qPyhp zbCs`u_k$C?jUBs)aeT0YX_te@OU%epN$Th|X)Qg>`!f*=>K6f99&(5?8!&fQsx+4{ zn-mhpceGHp<@WQmHeofJlLmK^>ty}L^+;mDlTf3fSHe@SS&b3r-g9!x>;-z? z*(%LmGL1vSsMkScjK*J6Yw178%3|7ceW+rYGWKgeImb+@f1k}o=%XxJZmq5%zE=xx zwFsP|SsMi~&B89fTTHl5>221#Pamlq!+N@ZI}c}hQ0FsTCSo)pP3gh1z!mN!`8G!< zA%VVAhHkE%xfN9CkqRVwOf4zYAYq8In${;u5_h5ePODJyh!f+x?brC=)Zjle<-2>? zO>_B+uNGWfeEjG~`aKiFz;YiWkVsHJ_44;~j*r}vSjky3YKm%Ao!uieo~3yNhHrDzKCl7bnFjL&&eD=67|Eh|$OOX`e6$EPxm zD{UIn4Nl+L97FlA76MvKN(1u0|Bq0ovw}SqYJFF^fG4a(>*KQSm|d3>HOeb{*z;RO z2wIe+Khek$EFA|F9L<{79}8sW?ueTk|#ax!0LEtcJ`e zV&T%G2fQ!$mqX5eGRDfY@#D+7$=H|#geRQoFJ9|(&SHR))8dSDH!=XZ8>!w0JM^)a zxOCzYYB!;(RSJfvwFS~3MY=jHc%KYTizV*RsUNaQ*Dw0sz=fvUd%+Rok2t{4?4cSD z<}Svka?#v%=+2F$F-pQLUIDwqiE;F#2;hVDra7bA4QZ!idZD0S|i`kULng^#jpM58_;C?Y2kF- z%c`_^yDqY-+E*A04V9uXem`Rl2y)*t#b>ob?ulq;^eWdfgHP^!##`MUZ_ER0ea-a6 z$sWfXxPY9d{y(%CnI+1$ATQ-+;8;IQ7D&95pYQYEJceq8iYRx1a(U$9{jJ#&pMN~rGR%wN)>0AIXCG^Did$7W?2TO1W z_Deo-zSuNUmG?3<&~VA(Sve`>uzt)UHB7|O2R&wleGxw7}=klC1X; zQ@>YJ4+cq+SvTSft7-jM9v%Q+aR0$YN$RgJNCDsYJsdnJwBDapm|beG$n=>IX&2)x z2P{*-bf?1T(K~#9w8Qm654l-5^TH2YC)oi&-#3OnQ@1P(M1{o&aKS^RLnoOd+MywyniLg|JVre=Lt-YwsSjdwN{bJnCL+e4IDH~m-4~tC#p>DMY@<)df zEgpLKFrEHd;zwu=3J6fl)j+NZ;AU&~W&sjYfwscPU}+`%M|9mM(SE{LbrOzO&J}(~ z8@kv&9_7H(g8E4_6CZ7qRe?^6#L#6FjY@Z88xTWyY!;9s@*#duuu2moS8`O#t_nS4 zZkmJ(5Mikqsi%$*=%7aG5-!AR`Ln2URs}O2X@Lveg#*;gmR%>AIXKo3+c8$SBd2es z>&y=|pO@kl6DWj-=+?SSKZo5OrruwR$5m-coW1lG-GMB*w%=SuPOd@;LYuIYw(o_Y z9tqNKW_rRm{)bVA3pDlLIy4c5_!OAW;$fD3F7=g?am8DrhMnNti_WH+m0M<#RE zCX-=ur@G!XLrN4*3)`| zeMl99fFCpjy?c+a8R6CBY4o zuj%!>gs`l-+vJh4NBSkmqS{L}-6t?H;bBD905MRMKU&PZ{f(8Pplm`ug7C8Eb`(z& z`cA3O<#O0QFD5Q$Yk3eg2N% z^3^5>NG~}18v;JL_p%rR+B8|xdBjcvgXDd)^W4 zXurMQDsfvQu^!VUBc3d7;}?tu2Q5MK(_bHIFhpTI4+fhdZF|LDZH384;=|My?3>Rf zFP~2)&Uo&8UR@k~pQ^hG1#riOP~$*I4ZV-7Q8NA{vZ;jdaimD{XA#(N@F7#Dnci(8 z(No`4KGorRwr^DfLVU;ZLi6655`D@1ia!=FbZWiyZRv-!?Ec~&eF=d*@6m&&dmdtv zWRJi(7_%LK%%9t^=r|FZ25*H1b&aF6s?6u`#(JM+Hb30oj0#N|td<{HsopvmLw(WX z!5w%gjM%dS)jrB_SlS{W0Q2L|ULn#1k4Y;KG}|j`V%C5RVoUaLwh>0X5#oc0Pppg1 zud-=>>&nG7;RQBQJ=R_IAVa-e9wg5(^IjijjVK&WCGUXbe84)vjYrcU^Od;{IiNPm zIiFkSTmJs8oP+d>NRNPbfxK@d7#X!!n%>`)h?eR|b!G)K0`=(*(vR*i&O7OVf)rvT zB5$XETMT4=XVQ5aYq=RDl90zzz{&>&=pSMg?JV@-uiJ=~rg zv9FWkWt$)259>p z9qqKSS|{9SnASo4_4E)9EJsECApWChk%gsXU9v^K{fp+8qBFytn-lqnG)4jni>XGi}ersuH}PiyLCb4;s?(}PoMFQeKThp$lw7u z++$uI-v{q(pGiH_&m|Ut`w2%_CtsdU_E>lfJAI)(6IB)4`^v&J1QP$5Ue&^DXLiR6 zuVLyH8^XFl5-3*nWLc&(_CD&mBwFqUnAv4$sK)&@?5ImlJdd7oRcvzH+|VGAVau6k zRXv}oc1(#c*<7OGu$k*lM}J3-uf6eRzj3vnPq=fFmk7J>)?U9;4<6@x!G*+jU3#di zkyiHZvRcpt+oo!WD1an?e6cl+405wubEIl2#MjSR)7e@#{s5Y=GMW4`CcPZ0pssxd zq%M2y-o(wu7p_kNG=6P3JRjM{fg4MDo^HD5t>{ecc%R{EiEB5hMfiqUb&_NLO451L z8*J z{5(LRQo_u;h+n&`US9qv5eJZvyBo;M_>ooI1nAVV7m`rkFw*o2>7_R;55cp~5_@R8 z213B6!w<(PtyCoKBST?O7Ckw{_K0uF0ZX!12+KjKr0H23 z%>e5Fx!Hn{+0tKKnzTRZ)PKI2V_}|in5q^Q;rlv`W?_w??*K@_BMzsr!e2fJ^M<%c z^D%#Xg4Fy;gf8weKk7~AY9T|Mj}eAoIg+fW2n`+5n5*@;X=H431ZUvGYI|QBIEFlk zeOWy2FypG5cs`23nz{5v#zU1x{ZLSG#wwu*x7xbH7RZMdE@sumVmD|D6@RpMdwt2C!d?jLSsFD({J(N?em2T{h- z_+F8ye2TTYSjLu~7KOMUYV9S}>X%H$T>s|cY!`Ln7+xv#HmBa0aZfmG2K~WZ-kXw$ zDY!7QNHbpaFn{HVc-K!yTQ>RZ5>j=B5cpf1Z zK@;vFfvi?;47K>=p4p!5^6Og+PYTCwb?fpf#bDx^UdJBl4VGVk+YzSXk@=LndCDA( zeNs_%S8J^ux7Ye`|2_>1*?nAHL@3TX&ScKE!O(jUaxy@e1+YLv=@Tp`Y^9Y^XQ;=U zm#eX{xcE-ptSKvmIs3fYVPEC6p@Cxv3ihRiOWu}QJ5sIK_j<%nla=b1C6-(P3acwc ztK#--=nKF*!ClAIKr(1Xdd#08H48a^z5}5=8CGH_M{Qy#Li9rKEx`P_oCQ^%;me>QCSvJ6?m*p8>47+(sQ8Dsgob(WRj31ac zW{FfRQ;B}evOgPe7?>Q8ep;?yzW68cSt0Sz@?i|jJ34r^4j`=r!~hRIFM4y)NvJ9R zmo;~p!{Yw>!y_|UCC47A!iTWwusG}eo13ksXAJ-p3biI{H0{&|VD}{k!2*hRn*OEP z6_c4LJ}fk~-Y4=0K&h;jD~~q~j2U_F)lJ2m1C^56aj^v$sJ1cjK)@Ys#T7`=tX~HN z2YqZBPU_J!>iq8JQ?l(0X8(DusJ3<(r41@8hriko1GJ|eur&?2`uM*AFt0OJpvtgfu=hsxq;3@e`I)3JS4=v}H!2Htj@3R_;FR_f8` zu*4$^BPFXT0+t9@oLenuu)Sai=f=ehECx%|HNS zN_^=J=dMa5hSahf?}*%%5AZI+kiH8`jvzmpkcgZBpvp9%cjPp{s?W|LE36^5Re1Dg z>5(ZEE{734#kK<|_f0rs44inUW>8o^IE6YvC)*Lpb{fdXlT^e`{cB;%;YfK17dH3)A{xwgfck-XfqgG}L@@nQ4i7oYq!;ThOqLLT1Pwwk;$=2di1# z*9wO5`W>K6c=osFsIlnn&?BxTpU{#T9uIg8r%&92jh_^dwy~}ZC|Tb6>Kca`$9um8 zaxiE5grg@`DR}Ruaa9JLk4n?cVnhuk8eMQVeU_5~f^pU=R0vcIn-Nuo0HVl_vXu)IBgr^!>JO_h%rU62_P z2t(vpm-hYY%8z&6;aL2%1*mP=m&zu4OySE-@~z$xH>Xrm-acE+nzYMdkaj1LM2Xz9 zujrNC{`zYC+bU8Y%Jo{JCXKL(LcB=?Qse8nd->e?EN{R3iA8%M_#MimBwLpp{VAa$ zIt$ycTVD3}ncTO*c)`gE4mgZx8m^1qi2X%Ny`PJ&#nnF1*>o zbsv^7k-!wXB9BSLIBZh#gQ8xx#l6L1~iKhxNL?T2hfYPQi z;EuByXX<^Crn%blkh3OFnapLOeJ?+PKnjAuvqf(`pt?c{vA%WA`fcB!@i}Ghy-_9* z?MlK0Aj{;jFVNLMWifeVcj!APH{CEiYxfT`4TLdMyW~-?uZBkQZsJM6nkZ?%UK`IF zzG3mXCOw4izHR>+A&Rn9plVWX7swrfmpGu%cOC$J2^#aau~6GvHMgoEc3dyWyK9l7 zv&!GB*}cayV}91L+GB4B%W@4>r@Hf+h6PyehxmwC&mX39wpM|p2W07*!W1LVK&iFs zt^qKFs6c-_j85iZ#^94>-~BT#gkwn1P1-IY`rN^$zC_1mKf=qr(j>@3S-uK-8759N;{-Vzvw zn8hmpK9<0z=rUdm@qBijr%i!tDdN~IaAd*wl5S5foi|=`GdIZeIMkY+GXYz!9ai~- zn8r%41uF`%@|kUYOVWdP_o?nkzW-qn+dWX}RhEjPlAU|Ls5&}KdxIB?=R4@IA-0$i zvzW(FOxOt+Rm3eZ{%o2V5JJ+g$!h7QatWPAvb; z=j#f)x^IIEd2?so!>Y>imm3M^TjQWt8TEEAM}>oH-{8Xx2q|bC21uJS=ttCU9crmM zuiK+1Gup9~Rh}GNQXq zYSE!SArs6@h8*-bE-*rM!Fc_GU)Ygv=2JGjdEhmLLKmBIBWBmDvI{qp`u5S$+3gqa z!eU}!p|*aFuJ4*{8srAv+&$U&X|WTN-m#nXRi)GL(^=1S;?S5V}(x++mM`TNzd}kVc zp6w#NG^2*MT`Oab+k!`ys?}^izbGuh5=l?)Yf>DqY5M>ZB?l#^-Dde$G}BOw%NX%} za1=s;^RZa~pZvp}NG6ps*+*o$9)FU;<|&F}Wkb=zPOFS8 z4g;lmSMIk(yq7yR9mtuW5&^Q<&_mRE6VOmg@H7E`Sj=*QD!1b>6sjbKK$L)eS{2OS zZ){p$LxXm(rH?P*5yB}A8kSRH*s^~laCcl(-fF|gEv~W=u-{phUB36GXOS2dpcfXZ zn(7cPP|HYa7_4kMJ*j_jCYni9hz~QNTyOxC0@kHHf+ESkv?gqCt8S7 zcUKz9@Mkt}6EBXpJhdusTU+cW3moCn4LaG-^xd7&YtWm;$nFCB_r|Mi7FqVto7}ly z{8q9oEvzDx$24O-p)iGX2FDzQ#O6q3*p?g}j#uCPC84pxYhGwd2#MXDj&qSK5iF;f zj75M~#aKSc>(kZdO6jaqF%g_u0~D8yuCu>02+iz+)HCw#A1qWrh3}73JnM3&M7SlC zD)KcU;U6s$U6TOISc!A^dbZbo=!+)WRq3XY%NW_V>JM&j)iQNp?OvDBNt|XcqfTxe z3;ei3P2(b?(#Bhc87?$=u_unz`JRgt-vA1b};@oCs#}nrh#MbCP;(Zrd@<{Jq^UNAlswCP? zQA*L$<1go!xmt_I19BUZ8ei_~Bj1{DY zF7b?{x88)aUdef-h%9>}M|{Sb>5YfG8k-ZIGj^1AdJW4|-)7tr&j%cuWHK=72ZLMu z@uJ;L)AuL7z#y8JwxCZAO{6hXm(h~x9-L4L_0kIO@p92*L3_@0$>E+6xCigaqC;k{ z2ZI8yhUQoo$sV!?_a43{=lai<5nMBHH!Uc zM;eRZJ+`IRWm@obJ_O23!>waKRUjBlwx+js1QbDPUWCu-oUt6SP?u9bmf>6}ewSl9 zwg|LHB^uwZkbW_&S+q%u%cbInex~~H1Dc^-IQVqdLf7cpWA5WMl?G-yJ`B4WW6M17 zD5zqW=Y_9(+u@X2s!beCkcn}``6^kzNuEXfrN2r94`lA*$4Qb5G)pNDrRoo%>8W3F zXDhI!XP%5c^Y&&7N+)Mb>uCVR(4d{(HTnS3eR8b`zOT{WgW0mSpDAmRTqfd=AtGZT zciOZ3VKd{^I8L&T%F?pDJcIA3Z!UV2jQl93bl0W1wJm9plZvjsQG2N^Ys%ThL{v?# z5c#XaVpxg1(>;~TtX)IbTtz8o3BSb_)gWu5_=_V3X3FVBgY#R7v7Z6XjXZeKxOj6K!9skxD}=M<4u~ z=5)5{PdH2n8h>0{V16yh%QGh8()h6bDr2r(<9*|TV85#PuxmBW@*Ip?b56yp-z}Dg zzAU%%kMFE9w!VjA!ivz&N&C+CdGwI2*GF#R+aAiN!i)&KyB2m!J0q5qK6lUJ&;6@D z(>jS_SiAU;snL^tdE8pZ)%=>S?N(@fB>bw|aC6<8nr8S(^X=g3S8~Tt{CIo$HfL{@ zbh*oonPIuiswec zatZwiifhZ@(%!5T3nrtZzYFpNyI);czqJ>DNDI&YB zr@^|R5KEBvA6!ffw1WGZ+f%JTe4X%6M)gXz?pZ1fN=Z&+GBsgB&S<@wA1Db=u&4<^ za?TrdXwyR>c}nf4+S#eZ>GP}hZ8%+N_Ufc>l+CKJFuGC38&Fo}E2Z-prDbijgH4#@ zL%A+^E87X+y{r+#z`oFNi7;o-l|UC|vTJZ|+^Gn#3-5BL<@AEGq}5O-1dJkOB*@ZP zbpCmRtadr?T^|dYp|>+0#lMZf_vXB-r#=e9b|2%(Uh^0v6Sl_I>8sAmsU)Vwm{Ys3>80`Jdy}L#9Z-UIUPP7hdyK% zW&J4o#rmo-B<#=p;`?p!)A5lOJTgHP&m+S;EmP_?LFmwrOiWW+hR@{T2DJx_d-*bXZ;9?UfGmvCUdZ(@k_q%ZA0U;)rZ2?cr4( zD0S-XMSSrh4&?M4rXRof#a$mzO`rSFnkZBxMkU z6m?TYrc{-(wiSXZjEvY`luLF(Y%$0=;~|NRL?O+IO2)f1h)NEtUJAtOCaaoeAOW+i zMIUMjm->SlQHrGKpmA^c9y&r(Hl&4ZtGY;H=uk+3fBt|(aG?o;l9hw)c%jECAWp_f zP{9%hR8*6kQDyr10=90Y25iLNEt~z|8*5ZojM4z25r|t>g&NrMY4DUB@9$1*5>ZrP z+Z2^qpkj-j=U(C?p9(tsoZi04<98K#^JGud*WkKPm{Bw1Rq zA{&=?tSyOYacn1JmLurq)Q4rgYf#({*n%)LJ2JE!&=uGZ%04R}?M!X&Jc!)WO}5op ziGpb>wz$!lA-9L7;)Mn|c(YhAXH2vTtjo$sX9X5^#dXtq9buP$(7PzhaMxY9`|D1v43Dw3OcMxYjU2^mfZeKi{Q>Tyu0ZnUtL1|L zimaj9^U~AdFOx>p_AM^1@);N)(TK?GDpb4jySlR;BozA78o5NQ85B!fI2V7uY^PipIz$ znYJVoleXKi=*#W1j;AT&Cdr+4gR_Jb>??BvMJ%q`MDRMhT@fEc~FyE4B^TvJ0hjEW{ z-SaKxSD4~VR}Qn6tuk&Eev35!I4@TBegf8?Q6CKo{!WGui@rSV;CbBWekN!2ev`WW z>rWTDVEbcYO^<}o!aQXDHZBNL9ylyXz$znmpe+!ha)n%ljtC=ontP{_Wi8u9*h#U> z3W&Nu?qo`DnF4|c>~|yg3Uk0#bt$h7@31jq#B&7--TG>tj?_q3A-}$}H~jdpQ^|`l z9>x?Tc#671Q*NZDMY7V><9v7O!Lu*aKJo>or3kYz?KC5($MeMVVu_;#nY5LlQtE1XgMd}CzJL~)fwNW`3R*boz1d#M-^~PZmeD1 zP*5}MGA0;O=y!&C>jqT!-0xm0TesSuSJ>9)gn?H@PcL-4(^FsTl@c9=S5^AKeg|kj z@i=E@dZE{fV^(0dvb@oAE`CNmds0V?6a<&9Xys^c9Jg3`r74KBxXH(fdri-f%Ui-e zcagM3`TdvQ+4n)5qYJ-_?+ce6qb+!wL`Zw38^E6IS#Tsurx%|N;}~I&&z)X8pA}f~ zt)$F{5*=CbXzj}^w}q|l*>dPx*$^tE2)4Wprmw{ba(pff&2H|_w%gxUGG}&f0yeTW}UP5aqfM@ z_xK#snsOD;_!pqqrrw%ncJ!-n{$3uY6Wg<^SHcx%o>boHa2RkU-v6%Pv8k>W z$RBdObZ7!Q%Fx55kz(I;YVtGXW5vBxn&IRGbJ-TZF@fxGT59tvlW8$QLWKX5-j)0~ z@Ltyw#dwjN0d4w$jEL#tE|^56yUaQI_-TM+F)q74ZY6FYXI4;zy*Bqr>y}-%4gO^;{fwT!@7va1g3uJ*EM%;I<4 zAK~kUHXVt1>~ai9glJX$^X(BGQ(g`v*B2O^r;GV@WL z{Ff1&9)hN%a|F5x>Jnd{vQ;R$f<3CzmcdchY9VUg?-BLe!#R(WMt0E=``^5WXNE=g zGMM|Rf3ic4NA25%!P}R7`Ky7SKINAnwth;okf|>}g5?Fc-4v>*wDBg=JpQtppF%p+ z66vj5LRyfoUHMdYY|MP4?dFZG&L`A-L;c7#$1v}^$HmLBPa6>zkIU8Lr>Sx^QF7*RA*CSK=nhMCsGklN6 zP*qZ+P3z-%vLhOg9kAkafchAhC=0ZJ6c^yOSoYJh` zhZ`n1-m4Qmcx#Lv`M)iL3?C?i7bg%$6~p_WqCcNONNT|)!BizaLlDxL8D|h{=*c{t z-t-&2bwnp5$ty1L#)6j0Hzt^kZvU)f=96G*OJNo>dC^J`@{P8N)WEn%Ja7Pr;d`qU zglp~maUP1oLc)+@h*csse-YITn0&lLd=o=){=LUP$61ii@9-jz&BXci8hz)k%#w7N zaJSopE1%%KomGlx;+w|~CR2A1Q=AoP=KGv^mXuXr=#yqm?@~%+EiWab&JLq z=r4uxBz&ipeb~+!=i20mu{uA5bbmVLC-zM2Y~AGzZc7^_&(D{i-X%r6{b-!2zQ+8O z^Owa7ZSpe-f-~zqf_NRD=g4VGMvt>V4xER?|jq;<`~WTwDm!NbapYoHWRJ z-EKQY{r&~kxjR!lG8Cr+1L<4fm-@kiJjWWB?!^^&jL|J8dMqHK}a+39^R)VU;E)IKT9@{kNLVw{W)WSMnl7N-O%F#PyL+*i@>vhN@avN090@AJ6#z zJJA8nQ_o$1kn2aYEwf&0tJq+LN|!F{&>r#?@{_`wMJ%kUv-_+)+~wwhRBmgaQ$!`m zB+a_DItta%w~XKWRm*cZjc-26IWB{<`GTij4V5ca7#Q0&pI{&T%h;pA7jM`(OMBfq zd(lQ#jdTqsy@0cvjn%qJ4g-yP*iKCo(pG+2pim`!ZaLrZSQH#)OCIqZWGRTelBTmY z9O9S6X|eleU9p|PZ>q4HcGHgTZnMrzym4qNs((^L!~BbTF2)y=`J0FMY0w~Dn@);) z`vbu#m#24h(4|)>wo~Xx7HAtNR1kO^qSG<9AEBM74Km-j9RgvvLtvL>ihs0CsZO4a zoIa2ja3jF$uDtObAHTgUv;CRI3A507^QhUbFL4-{4}y~?%$x8Bi-im(QB{WIX4T-l zb@zd<*;$C)p<$z)bL-xYzkB`kiB+v>cJ)vAN=G!s$c_-UFSpu*e2r_f;n<(a`VR}B zc`o=M|JwP0!`{mF@N~3lut%ZYVwJVDYhMP})(cSiHw6Osh~>LJcm-HvMuBkVH(V+{ z^Vp2y?S4$Z^l^n|Z=J6qK7_vE?}A#)${irtpCk#t1RP;{yp@;&du@L38LZq@_PSG zDafH6X&*oUeDD~Vck*&4D9&F|V=_gRmT^_8 z!e}Bw$W$L~`a6O#~Gf@rodFj{4zR) z6KoB4THa6NUoA#FC~=q`xH<)P_C#W}JGyE?OqMTej!6qQLl@kg6vJ-cDkt$oLm$IT zi6}S*XBJN1K98s(C@2RJmO_*Tw7FOcGdgfAxy*l=(v+kf1Ep22PvjbpN@hoAz&M_A zH%wL&+;vFBymDO=#g6~f){8Pi)*B<_u-RX9Op4Kb!%O-+#Ml)xRH;7Fo$RG#(j_T& zf5=!`X~7T5NC6UiodT7a=kM)T%>NC6825JCg5lJ}Lf>oP`1UEcOD3B&+fY0?x?=bI z!#@VKR1nAO7+VPvoNzCARy=)mhM>M8@znc=nytHN!l7?pH>(k8_0Ugl3Fsi}7(ERin z+jFGNIFJg`hb4#w9;27X`+lFN*y|)A6i1>5YVg=&iK?%V5uz!MesT{IqSwEN*Cyr% zTr3K`U&5jMI9mJ=EgO$+*jg8oD~yF6KCPkBUzWz@(USigQ5yVcC+=XO2~^*ch+WVn zxI1JC%|wBQyXmytKHOXR<9+r&l_(SQ2901-=M(3rx>%flX$JlNp&V#KjPvb>m5q;K z!9tI$5jxQIX_6+>qTZG`P2+UqO(^LHGSzbUem3~{AH z%VX75OpO9D#3IY-1t%R^x$xA72^PdWEq>pbsoRZ{!6hR$FM-b`(#VZtQU^cA4-w9l zx|e6P!?-cCNx6x+aI2j|&y<07*;73#=@(t4F36hvS91FEMyTzb5Y?jfKaXe;Iza`= z_`|$saKsynw`vclUo4q|#CF6H)$yyIagV+*fbOeBxZ;|wIc-&omIgK@16 zXn~#WtGCn8E5x{%PIH^KW7D7d%(H(|we_i-YuAsw37z8e}?Y z+M+jN*wGtfolbKdku7!D(QePm$Z{iFir)4?q-UWC;-92NP8Sn0--grFOZYGy=N7ac z4l`sVx~B=5-ohDMdXT`*X+x#~eK6y%@RL96J80u=Z+01#=n_rL)~-rJKH6z$BkS!< zlYrBeuT-7%=#<&YWuR*AC6k>pEOb2x*|YsxRC9Yo@T&r?oD~bMW#u%pXQSpW6~FIK zU`zU6*3E!Q23rRZ4bHuw)VAOm<6|BFJNR#)gF#hxbF4<+V=h>qg>1(9i=|`_Z@G4G zm@c_Cv!|qT$m5zX5*^gI;8&4ga(dD71&b3>W8sbn%rJr`6b*y02Re$M?KFfA2kl9-&y|C8RI7}8YPlG z*s{bwZ{O3NK}3nfp9romq-bVutFFo|XDn8UH!14I{9W_$ zc*H8~)+eb4Ltqa(#0uf}EhVXa4;~+%{aOycc-?RPfc4r1`$c2>pkIHWz*6@V)X;_V zb;ZJfz-;{2rcM9(z~0Q}!cAPCCtO~jM5^C5-KgT-7Xj|azMP(c>W3kje%W^LNn!Pg z7>pEOWw%&c?ylC9zu=pyWxk5_T!gXj-U)H5lgoHxJ}mL!V@$7q>3Fd6chJ;i&+;da&T<3IC>!Qb#4!Lg|=a^yoVVjSvs4 zRWwp}bRyTlkXMvx>ro>OH5 zvswBl%Vd~uXCBn%ucLV{oo`}Vo4L=NJ!ao)h#cE1chqEIKfi^= zv?EQm{S~h{8UutCmzPJ;_RpUIMQ(VJC@#*+lX{RKh;&1xPuonm`SOfarQ6Y12qGT!y9FLV zsFWZy1TC4yzX%?UHp^u6_ZWSTT+ZP9bJ~h0DTX_@l5N^N=z(0_t^aSMRqT3@;Ny>j zk8n&gx&4F8)P0TnzSi19tuHK`971z%I8? zju6~No?SG_CGH~Zb=s_Q#CQeb39X?*yr zIPOS9Ua`;8+vntnU;UgWMM+E`j?FtJcrk{Tpul1l=Km8Hp*BdoCw^*LooCB<%q>sr zQX?rZ!;Gj`bfFp!$C?<+JAP+8)s3=yu%_+1vU(MgXNy|*H}W>6iRYtgQY`N+XE>_` zU4@kkD@PZBuE-KVCTcv^>*pdik}zq1LE4e?A&)vu!y;_h0bl{@9fjKUnLNdMzJzMT zMJZjJMii^s^s0=97%1sEg9mMIUr;(Opjt18;Z&*2M0uObT>hFTR6Us7UO8DtUx*%& zR=lYhpa~j3^uJi3(Le8K=>4nx$MvrdGPv!8F4279d0qndU+(Yee0(Y|sUPtX5H$Q% zjNKVtiC-1GU}27{T*>DqZxz0cc@7D)q3b*el)j_PYs4ZC(R8Wy$tN8&W~c`(wE`<7 zUuyOD?@Q9SPSm0*HB*mD=uXY>e>0qfGiN!V_p!^&`JIHx@O;#CBa7pH^FjLCdANLw z$gI|J&AHF$3F4MXzp6}dH(%GCL}*-~5{_*OO(j$Q++EIToQ#$La}UFMKi*q;gN(i+ zUOSAw{u)yyRV~Y}$*2f*yAp5}2L4^ANQ*plD%;TrU16Hx`n>xboa zL(6$f3ZlPXQ~kh-z$0As2+pA`Kg3CUE7uR)%UcVOGuAV$H819Kg}u&{U{}VnGJ5gE z6CHF$QD0V5A|`VcEz2vUCiBE!m)H5kuC(W zb-tlGJIeL%*&T_K_>o*@u{;lyXwx;~&P-plm&Cm=?TAsWW0YtYR9-||Bfx+E+k?Pq zhF_reo2<(D z&nKkMaAoGP?@B#zN-qB_X9?*h|A7x9(AX=$g!h-f;ul8sIsQRAkCvv)&DX%69pkc_ z6cSwu;}-i8$7P$Slf-ch&j@X?V_c}ln-)s{iKK}o&q;gQNm(VPVD z;W{$Hj=h7S24(ViVX_nQ6dCQ>D$&B+^TmJvx1jyE@YCQ!dWqC_?G(>jj~C+{Yz$u- z7@wA`52NU0t`DV`>Ng>L)(+!YDby_JXV7a@)Y5}V4V_o+rc0sxnrY0XAeNzJos1G+1vI0t^!GMun{k-MJzSv?1sptzGsZ6r`-+NoN)TVek!HWS&+wRL zR01&%T>6Ul2zvR7~%>uj; zNfTREf}flS;XHK9_a3tb!{YLu#`DdBUG~G8Oi^z!ryU~ME5gph9V50+?_nygus{?* z7g;6@t&0SOp&2JZTY!DE_Er&Hcp&2Ilo^-fVUP};k6At)kFILwoxzlmppB_54Rp_5~^bc)prm(bBTxkN?N+6z~h#sw?6V zqWMN#mm2qo9O5eoIqxuyzb{kq${R0YcsA~?a`@F(drfHxrbySLVQE{{FAwA1A7scl z2oqA4`Fz}wa2BZbbq}+ktmtd9lBhouRPo>;Qw=d$^W(7jVB?6#4FvE;m9jN`*CEZ! z_-`?Dca_3=3#t+z`anT>NDUG08V~tRhzD&{*;A!$qiy&3P7w&@Wx4_<>W`;`=xES}jmmmD^{5@M4alFFG`=iQuM{g6E^h08B9 zt;krU1D&|nxkGZWfw3mB#K1qM6GY<@21gtDdLqtlnxvZ{_v7Fnz zRe)ltBr{Jj*8W*mrk(PwqJ$)#?>kOwOuwXk+4p<9rXTT81P=#>>Gh`|wE(4y(7MCnTJg?nz$_FY(Sh72Qr}+6A9p^cdJylt5N_JAgGT z|KuNkZWuAWESdl4+_h*~vqjzW6ERxQlWgEQO|JlYguAuQK+L;f2P9w1N|Z36sX}|g ziV+l>iIiY)>$JsH*1gPP54)uG{Bu{E&M31HU&yXDOH?ZvMS9sVqk-GpXx6^jGEtJe ze$DlTq8!eeecfsvL4Tb#jUY*^NX7)HMHQ}9I(Xk>^y@|DO?>02*y9k`O>R)+y?|x? z$0)@(UN%E|0RQTqX$2q&2{7evQ2VFLXGotJ=TbbfyzDyj8PRiAefmS@%aQEKuc{g3 zj3?mKP&9OFBvr=TZOh^;x-Bh4K>+%1kTr-|Ui7t5YJ$8s8qrLo?ej`YSTho~uQ8|Z zc~H^DN}O4KLWT8@tudkzFsUjA=wP{NUQk3Brx=m%&GH)L0w9B)^1+ zUN55Y9BcRWBC^q4FCtq~%w8f1Sj2{~RWEp-A8HY81RawIlYn}^UG@|dVyLv@pvKxu z6+O{3uehUy&c{cZ{{F1pkuItXu}HzRR)SJjq|V$dA`ko)w#WepU>ZGG^j znd01ySIRX!L7hBAMHE3RpJTVlJ$<{Vd*y*BJPgR>;E4~`snU#Gr0UW8{r+7ZC%=;E znpV#ObgY4hvfA84s|UZGQ)`Iv4ELr~h7J8MLtFzQrbpPL4KfRL#kYyn9@SDq4orQ` zZH_^B%1`(W#pOyky}}&-_c&i(ScXm1u3caD@#w3<;*ByaaW%*u!}pwd6rD>QM(qS@ zKXxL1o30<_hu48NPjo?yIap))- zwWY?!90h;#KwlfC2Av4aZ@mmYBRb>73#?mWifYO@sZ!wKgdR@y%& zKuA?2rPzP@o{~~t-U&L1$I7lq`zdL!e3aJd5vE@pe0jX@8IyniiM$H-lW)@@96P3; z|Me=g24#e|;TktQ)VG0;1)rk>D~upSR&e)0um{{nt>-s@IxmK{Ma3ri&iQX*IXU83 z-V)Ucn;#V;hRn7@n$KP5jDgFB>MLCNb_Z|qjoMDsj!fy;ZowvVw$FSb6$`TpkYCqY zE<_8~WZTuF$X3$+IP4}c(P`+Y1%RnL|qG)HCxqB!3lV& zosG%deO4_oSJj5EEK6#k1JP+FMC6zLAl79!x#VPEcpoGDmsH{X>9HxKeYC%=C2GHZ z?1X5076q@d?YVRI`VMZGziAk8$V+><3_#{J>jxaG!xOTNf&=x6;FS(f}{>XF?j3y!LxBvuP=Vgk8yfLaQ^#+ofripKMz=^AH$W%5C+)-%&u7! z;VBI?lV%?70LsTIB4Nz4hOWnluZk1BonGU^QHq9v$NMwui9SCa#g;OaUFsbfm$`@D zsGjzxD%`cB^3ieWm(wpm5A)Asg1&&|tpdQ=A*b^k znfs_QhIQ5t>8Ws9$ z;^WMaP?`Q$6&n@Z@*dOuA(bIy?RZQqHgF0!OXOtoB9Te&)?tm>VZ(?v>*X4KU+G~g z*OzqrCQn(9QL&cMeUO-S+9W@oYiuWdQ*EdEKFjNs#X<1gEo2fwrD4jhx zh|b~OewDV08H_OGXT_Ce*1n0iXlIzV$2MqKyVyE*p(` z%C!brpS7aGAO43@X1dpB5&v~$s#P8T*e`-M=S&Zse~KP`Jn|c@Z)hV^58zX&T6{`> zkNj4NWA;hXli?W&!H$Dd^mB}9Cl;*)bY&fc3DRAq%5qG^!_F!fHXj)y2IF#1FMm=S*1nIa?IYE%gG9y zL%%M6C+1<6&NbLZR=q`)z>puxX~qRsPvZ<4@T~8^<+hc*+=ya^K4m?>&AF624moi4 zWpWW(pEf~%E4TArWFd+-V=l@65=d<}f4)Oy<0ZvxXa=HJ?nS{=)1nQzM_WWU`W0xZ zyD4f=vK;!Ii|QZ{OWS`1oXgTV2|qx``*H+YKHaixez*5lt*6^mD9L&#B{A)$1)NmLf@O_Z;0KLlDb#nZ8q1V>~Z`g^N``NdJ1w9XYb3sM;T0oxu4Lm9grXdNF z$<0LJoxkGUe_X2JjWX#Cen)w;@AJgLE0@PJG$T|@gDi2Nedy;33b80d>lE|Yjj#YgARGAPjl*aE1)?3SPkeeChx1sSnF($c!ZA1U1X3!tdof_vJGMr9r z`;nBp-KEB7^2n7+D{k29!ojME6yimKgEAYgk$mJa`NdJ~&9ZlHh1&da({W?30h2MF z)0JTJscI`(L(z_nTvrWP=w9WFHzRqrOAE1*TU@jkwxjKPdW4Ng8@_yu81TO-{N1as zM&4yT$Kqdl{~2=N?b{AUpBw_;N%c@79pH?n#%qu)D*GbsMxbwn#L&m_03^Km2$@ag z5Fw`8uji6Ei`z?)UMOZ3{rEEi?3%E&*SE=F_$AF(UZVW$&*+)M*rXMefG8CR@UvC` zn$c0R@&E=FDXcqmTgRfeB{JqCZq;@Z($pJfk)2CFzro91#e4(o6NauF2k79kxLDBu ztDeLN{`mZ(INBMm_furAMUi)me^l=1J$S$l!Sie{$chsY zBS4hAg(kf@&+=Mhbr(OKqry-<(8qZZUc2n8X-`KoUadkguqp7-Qa79@<>+uJ+0c#J z5`Y9U>7lySGjiM3>NvK&PoMQp1m$Nv!9lI>(KlHcW2T?6BNfvyYG zr_MfV0O0nsYyjLoTze}mHDhcRfgoF@M?cU%d~EEttNF~n4pI}pMh3o5Jjejk{T4XF zabJH{z{S4Cj8ml0jYMAKoJAt9zfdHyGHaR&P_ycTDxbj6enb1*N3p3g3DACk|I);+ zm2Or(H1D(iX4KHUc50NkpVRPeZ9gZmYPyo(<_QT_)>5ypj@z<;Xb-cvqkZCg()GF5 zSNo&ayKxlJ-c9a3_Qid5z0%T4lFIOj=e_Y zzcNhpx{Z2grTA|Ycl=#4Us*x{EnWD+r50+50nBG2ATJA!P7dpH9|{ZgGXs=hCJME> z&(kRCdHqG=xjgti6uW(#@HmW{ilVgY$bT`c*{}BHf1$?*NA|LG$IUE?+5gPEjl419w$KC zL`cnO*2hs6Eq|QG*7C$#oA{iC_p0-F*}G8U*y8J2ab3{$;?@P#f=ve7bs&Dd4(sQ6 z)L|sge5u1shLyD_fq4>f(wdbfL00NNo0T7m4Sp6B=R`Bh!2&Dme)}b3KR?P+?p5Qj z$y~$nheLx4pW|PDA=(Y(9nJUMm>u}gMD)ijX7oM!oNX>i^Ew$w73v8eAYCLt)SS-n z%%x}ssv*8zi%+$2{MeUzV{c?4`34)metw_=+TIleA8y*Mjy!CoYL?UWiSWoU9nGdB0JUk2Vava(ZVB2c)*BM%F z;xuVA!no;(z>pr!zdnb8ZqA{5pZ=U5>jU6_t<9nNA?h6R?Rq3!j0bjQ?WvY#*QYhr zw~u5zR|&x{&O~VSZr(*%t(3mi5!TJPgJk?6@9Q_m>xL@tr3Zb~^bo|fwRh#kWg)2N zGG;w-se~%Zf+wV77a2tZhC_U7)x{7sTZ81s$K(^j+d~$bci|e8*DpCyhlcyjOa40a z`e^%qo~`kO4D7S!dzM#kVMFOJh?049!2z@%_-(c35go9cPLs#)Uj^QrL{E&m-d``` z+RY-aF}}MnlLnT9^{7rB;zA7eCG*E~bs80v<(Pj>_O%{LuYjywiGU|G2z~N|x)OY2 z1`(@1GkOU9^}fuF9t^z!k(NA)P<~Kd>iaMaI7cE^b||^*0VucDxuDtyeCMzXS3J^O z6;Vp(C{&p}d1xs~mjg9qU_H)u=`myxaedf+H5&Xugxc1;yZJUagy3kz`26LtLLarO z$*;F6Up})xE0Q`C2?vJ4+>#f>kNWKUyB}C)ai zkAHe~#@xIkG17X`7?DDS^1?vYBrnWDk|}q+rkOu<)VFbgxlVo8`2-0t2SZD2LSlL_ z!Ii#J&lWO~GxN@icZJ`GYuZ0d;D>D;XTBD3>(0%|#nYwxH^9rrOaS+M z`H4Uhjox3grW^a*5ejKOn@4S!`MO#lY$4 z2IeCufSBAGojU}y?SnZN1_xlK#DIP!Dh|`NDG%OHq{jmt?b@i6u`HhWK^*;N48Vq2 z1`}yHj2xXeOBeTE8kBf7Z_!@AEnBlwmCrz94z?E~JNiA`>k9@)um5-n3*2gEE~0RrKey5ERcE1+!5p&m1!N3kHkH4JUvF8LHvUCW z#eh*8s0ju6fO)r!vz*{Z>HLekA245`sz4~!4+6nej!u~jSlXMNIPet)2k$t2Sy6L4 z@WU&C2i*_z^+p~<)9Sdu>ue*)Ly7LTCfx$=Sn+;;#ap&y+47H%~ z*^!ntOE(T7%PaL+dXRdXHjCT3>JpT&!vCh2#;jl&o*@2uSLy*h5{ z%C)tH$le|iTe6F7uYeSOu=(VA)XBL(1U}Cj7^^fT?Dm+G17$?cwFG@ zzV~HExuI@N6?4bM+7?RM=7lSL2qss|tR}{YtbMcu&^Hx!&K6n&|I5RE!lx>*OmHBD zkBok&u=@fZ2MO*BJ6ixmiUhzU26wgxjv-g}0CwPFO#d}|Iro9`o=~OP<36)!GG3-M zn~F|$3LM?~E$8fWJr__ZTpm8>7b{|tyjLzUQec_*oBYNG$O*Ml*+B{1g*m>u1-26+mrhqqm(RLf*m@E32OMYmXZ9}n21n? zpDla=Q6{%Fw$4F@ag!q*Oi0N?0kY0^WBoTK+XuIPJ6)jShfHy-Y=lUv3Yaa`W1Fsa z1q`@?%1yUO-y<7GaI$Z%{d_RsA6ROa<*)8<B0kdX>5D)A6e{66&AwoB!B~~i?J4< zE9=D=-uVJZkUYpDR?r{+6xEV~yd(+w4DyG+-o(3czMOsZpm9V30L{>%G%vA zT0KKLWfQ39Q4?&o!-T^Kh{+jyxGLJot!1ISxl$Q@-<@c<--4HP+7iq|h{^Jp$5hZW zR%*$62i2Ba1l*Jel01d&qR6J?#T*`N zaTsKP*21VooY1i-CkIIM9Hi^3Zrk=ls$w zBOClu?{UV4xZg=DP2I61IF9Fiz@E%R;{s-rC9q02#&te;v!;mE+P02$-Ek+xxWQWLzBYuH&>lL~6hrhBf22-1~ zHl3bqRs0VQI@npYyg~~+Hq_2mFLpQ9uBn1}$z2@v601RET#SuVT0xEIr>AEwv+VX; zxxR8+kf~DZ4a*g~w~Dlul?-_a@9Ddw^jgX7M-em+j7T*W?j{p|=2mX{60#G&vcnap z@GtQ&vqGXdVg$#;x5Px4Mg36r+J8M{Z~paFARf61Xo7QFtuprqu+@1@NWeIvW;;mm z2**4&9s}Y)x+S$1Dhq4FPGW}?U@t~>f?6YqEvP`kGWk5s;Tkidt0<1l@$3+C36nv^ z;QOPctMhGl&sOIiyimn6cI16yIg99yw%+<}^8K zaSoH6x5cSvq}B^r0p56XIH0K9R_=i*6ey)A?@9Zqbj`lpmL6M>R)DA|Dk4;$m@iVP zR{)2BcV54fQ`eBTnp7JO+H8vGX~t^5LkdWjB*10oJc_wU<_fK_O*Kmj_i5?r2=yrw zdg%EAI2%-*5aHE?lp__}!r*JK^Rp&Lmnt_1SR&`%*CVAwK@2de#&^gL=K_BG^Us)a zP>n{`z_hJT6lg)(z5VMsGh<-fKpyZI=;$i?P&d*FNQ}99aUO83OrfTB@UOVXcO9XI zuS6;a!-1QYz11+%Bl*wg1MfUw+>=?^fg@LqCMxG%1 zTWhP4t7Qhz+)eeh5+=^(|IiLEH?asc$}-+`&@)ElJEwBRBOeyrUA$XBw(d2DBWq)# zSK(_#;&{&!t=|lO!H%Q&sknAp#0y+dnYF}=Z(%AR|6aMT^}?q7$7EiCTWs?WPRS>zJsS)B6sN6Zx8!j&rxQ|*W5#dN203ov5jx$zO$>EHem}T->{TLza7r-s z_$KZ1*geeo_oYYToH$0f;)+s>R!CqdeQz-7VK{<@ChG@nH}h1>@`^kEO@zkZt?;!A zl|+zeaH)o_paV}?G~jcV@Cxwh$PdUhHZC>oBfy5O1%JyKay~Ety>C^6y*bS$tnsMY zA`y8V^?~|Ad$ezTk6XR-3|sEtw(d&5kuK~FdQ#%m`#&9=CX!prK)E3F+B3!J?!ci8YS{FZEC~BO2Ce9mF-B z;+jeSoh99bVlo>WJPuh(e5$rOOusKav|&&)djYbk@?gYGB88B{FXj-?u`&QvCPx^6 z)Y>N;YKzHa1~bQsLPF4Mv>j||Br1eax;eq|N- zfjWqLCK9{|7N)-$KG9qU>V<}5tR1Z&5~Yp_kAVc(f!M!nBJ4qX7L!bR@wfpgc#Mz1$$58t9@OD7{dH#3;{#ZR&x|MQ8NBfR6o zs~ix)a;P-wjod`ABe~mlf%5TvWDHKfO=uA~Q;F!kwbdGC|5-bgKK!V|QMj197zMa}iL{ z&u|o-;9}Df+JM!ll>0&OZi*G=lOqlz3|9`$H^j7kh043Juf_E=aORNaFJ7^<)-?i zGTEei;k*M3+eJ7b%5|KRl3-SoNFLUF!i*wW#+R8j*}~hcVED^MQ8c76%zYl{t2;Jf zbM&X11VKmxJy71vjBKuFX_XY4-r11x@qt_l9sYT0;StF3>84S$@I@z!BsjBuIl!3> zL)(-Cc?s{`KZD3y;N*BB&JqZ+MX*0E>QhkCDz6x7j7G}9Tt4}6E9>s__HFj!axl$D z0n`U_v8KWdaKs6^Taf?>Ru6)CN*zH^$q=Sm`o-HzDM3fBV-n5lP5QW)>q4H(4TzDJ zHlNMdVH8Uo+e(AP9}fc%dS_qE1%bTHGSNa>@rbi3Z@pwA`AItNsgH@J%vzy)~8s>tI;3) z0;N1H59ginij`I&iYcra%Vu|qF=5J7LH$QyyOa=x+o)RR6LJYh5(r(VgwoWJhxLLA$@XIU+9 zqUv)yC5uv7wzW?9JdtXeklPR3VHQMegl6x!K-3+aI~a|~u|-AQ6p3DO&@ga;5`n@E zCFJAu;w|OmVPYUuSuXO9bx)#!u@v8W4qVU`;PFTPdY$=B(r-7?P!>c_(SvVtNf66_ zv9b*O&jzz6bM@V)hE}UizT-2m;DnpYKFbGxA_~gbU-Q6?D|xkP_Yai0t9jiN{)J|N zwjPBxCYMOaS*>|sNrd8a5Cmoo!qZ~P#>8DE1;Y-%ZkfHmcz%;x*8HF%5>G&{w4#S< zvm8m4`XzPI_q|MO6lf)8;@pVmfkyGN7|WE@%lQ-?ven1iBV^iIxrxCsSmYQ?2@2f( zpwzU{!calWinv$=u$OMT=FESzf1`|ApW^H>V~1K&>sz&rfvDwj#X|C2=T*(O)E|nw zTjyZt;%v#jqY_N&^4c^#oSVL`uo|MuFAKx{4ftZ&}Km5wq?S>grn!fp~v8nU7Qe`I3~#j6|14s-TY* z@1FKqrCN*%OxP@l_4HxvZI}mg+_x8Su>tCdl={T5$EYDYnIvOCRY8%v9;9N;Bs)j9 z$Y4y4IWoG<0}OYj``Px0_)YF!{Fit96LKXqSgWvJUTa*m%PWv=O6cAOPB%?)c{@3d z=dwXVBx-=?`?fI<5RuQ7zqEEf#hW`V2zowD^jvRF#7b;dV8+LJAk3QLy|56R5X2&DxnF^+!vfr=NK{9vtWENZSD#i;awJY}c3}sQLh9g{ z-@Pj%in_b=Xs&ibx8ii>cId-NO&neq3OT+T6!Dy##2h>U!}dgfg&gS6L{W^@kUi-mY04q+gc ziYdX&r0M<$re$`K*)YFNgxM5ns=s{4<@9twoYK0wU1!|)YS-L4BR2>^BA3VzwE%*} zSO11R{(-ch3#?Ws?!A8`=T<@{5Zn1aaq)`2f0O38Pbkk^*%!q?2I4^Hawgv*an#hH z3N&~KVZ*`AuS#?ct3a#q^9x?>Q@g74_112xQ}FIjcu2(nS6vhwn0t5%VwU@F6<0$V7X$G@yN!P z;cpBVr|SvcWu{WdZA(2X-s;N@O6Rvu`eN+^GN)6haZ~P^pi*omrcHTv`*}arq!Xe+ zI&AOm{Q=_eRt3B9<~nI9N#-b_!gSWkn7>lr4umm;+`6G3@zH8<3(>3i6WGvg^xwUz z>lV-+Os|l(A{ydCvl?>7c^+6zR)BA;iehm(yx$QBe$GtEK1uRCAgzSFkJv|u&U)cD1&tp%IcPfsFVjlF(rsy((OCutZM`kz+3nl9p_;eG(iExCYD%66E}4;)t*Yu6RXut|8Bnz* z9Qp^dlN&Trq{4Ai2-Jcb#f}_lKR{c;^gst@ZoQn+ul>@2HnQL9DO}P=JAZ$tGdBhh znwG}+R)f@`-h72)(4EOImW!|b$r-?wgnX^$)}p@I66$qxTe$H%?d|`t0BDrJV9w?+ zYeX1@n#HUkDIL~8#J3!jnszW0Z&jPh_-;tL9D9xv%mS1m;=Ol&kW8TtMN5TXo(!Wo z2Ia&D@_W(_#-9Xy&s-c;Kbn=#w!Agjo^Fg}`dx)PiHjJu^5Gs>B_Otm^M$_BPS|~S zdn9lz>2Z0R99&usKDzRO6DoH=VbYtd*9J922+i^OU3kEgE}Wv0g&egjq<74eS%p@G z?lbmS67Xz*2r=$2{O)NBA#=)!sb*O9BuLpMy@}>DygWO!Ui`~LB0)wpnw}{8^*V}_1)bktbHhh%4vfX^u2K2_Y zKgYfLXZmme==R;#(<-Tuv^tfv)7>CafJ8kPzr(GR|xwCP;_&OH3WjrP7b zn!B7(CTAQnDpDjcKWsn_EpvSvqQmizRMoyK_S2BIHzXxXbS$zZ0ouF@-gx)jvZ^k& zLP9gM;f()9*?R|4{l9O(pG1_=aqR6F6%J9ENk(Kh?ChQG$cXH{lf9xMkwf-O8OMlF zHlaF5h_Vt=&;8E#_x$zz^ZfDo?-S1Zyxy<-y07cHulxKy^q)O)^yDK;m-nx4ac!J+ zvD(Y6W2s+Hr~7xD(&+R`h2B$Kp|y|Ute_r1ok1T7s9?tF^SP(DoT3#CD1YNnsr{4+ z&bmO3JDaN>{BAvlOuF&tB`spn$T9km;3Z$uGwHhZ6q$14t0P9;PXGP;xudN%hD^&x zkMIsGTj$|3`lEv74CAGoUP)J^_eBLS-Px_E$)*aqNn7D%DCnU-fQr7_I;JNp;eDF( z$Z*bG=Wp5`DA7yT227?@b4&RHx+o{(Ssxh~tAo;Tap2*mt=rf> z^oZGa9XpyHpu|sYAAXO^K_V`sdWx;l)q+P{`bUIG_AJDRc-Lp|WY#bid*4DVGa}{B zV=PuGvV!aF=n{gpYgh*a8U>>-8Uf7da%omUXY*js(@w+^U6+uEpg;9lxbX ziQnsAM9LVUzhDV%au_aZ1U7yaVFz8o(BlPCi>cOjZQ+1!=vUyPVZD!n~uyKPD2_-CS8{G{Yzt7xl-@FmPg*qK+`9^DY2vV;jHT#%bB44roeb9 zq04pp){3;u=WL=o#y0`0e}`Q;58mCFs-OxPqJ2jW+pV5j-{yBBKQg>Hl|{Lt<_C{7 zbGEVmfdS1CV}rB~i8tcU6x^cO0uhPwINhl$+Wcb4k<@IbavC16SfPdVz5|Cee~g-y z510ESdltCP8K}~$h?rLV;ekWivXhOqcxL9YOD_~A$}|`hAEj?io*rOc10Xerh%LFi zl0@ujS?}t0im!X{h}-3jwbmtIRW?a8-}Sq8jK*{*tp z93=BiJ)st@rrFkWaVjoU`g*!$+8n zbULFnwEZ&jQC+$2k&s(Mi_WlIItWRjfVy~Jxjnkt@b)(q2GqN4Yl0C&YuzcTBTB6( z)uaE84VJq4;cu~Qxzj<3F5$KJgUdRm@_AP*58(Jb|MLaU=$Gr+0{erVrjeL8gvj-Q zJGQypwArrp4@jwv1^4Bq0$?D)6Aw%Kjhai&Wat~dBb~Ms2JI>_=(oMhzcV1Sf`v-Eul!}0 zpgwvJK2kH>v04)Ouv$iKXE5lGPR`rAU_sQGfr5m^4RZR}FfC$oEX3_3aBv$L+U;j-H zV~u~t{CJQn0P-a&ht!1_=F6E@Y_a9QmS2le$T<7Q#5<_w_eS3*5>`9lkbJ6;P>LScZj>syVm>$Zd|?Zopzxe)Kq=MX6RdR{?L`)amz%#1 zTm$Ai`RIS(v%WUJ=!zjsA~cn|={{UfGSPxIkO6xKtPGVGA@hCVlf1xJl~IjLU_Vbl z;&0d*Gj)mIU_ihVs>RFZ`DBgxPb5)jU2q3bF46Jbs(ZI{I`3{fvL<#7ROdGh)HoGQ zDY?yn&*$@`GI|gn!z`?S=x#efaU~jS1E-%!l5ojyE1HkaH~rJk9nb>2lguc_M!+5$ zBP$8A+bNu0ychSKOktEip!O0N;@N4xo0H^chB35|GQzBN%(mmon+)4P>+0fVhjb9Qv3qTSlS4W#foUHj@!`}+WwAnftjf4rad$;c@gxy)3J18=z` zjxD~HEu6Xh{x#_Z`tA@qbIzdOI3M6T)X|K?KvHBGj)$=P;!U#ODw9?ozxJNM^0p!d ziU3?b!I-3k|DL&_cn7K;RPJm?2M69b7f{|kSuJ;(3}A&?mKxhtVdN@Jw<`$TOSWmT zEJ)Fkli-VSOV^k<0}Hm!^Y_6mDx8yp+ZDR;;-g#+-Z|+DdQ~Yy#FX%yTja$TA<1L7 z)ltW$<;K-}W6M-VuVWP+rT?@~g=MiRr?iqSf2EfnC8zQ9r`b)8lL|wDd8nIUwfcK= z(C6Y>gS8>OK~zI1frN6Ugq-uPyeeK)DUq@)!y}CRB#p3FCB;72ou-wUO!)}x;jG&t zl?3$79JGB>kYO|urs4`*QOZ5TB+IrU^79pj!$(mzts69yvZRvfV2F5OvU%ToP9BOjc&`6m-E=uac(cyBT?~& zJzi_m()gUp4SWGM89F!^xI&h zcG-1OM~9$Ou~K247+1bEw-}B{env)ZK^$Qc5ziB)h!hxG?nr+jtgr^Y9AWeoe|xTs zY2hok%%)4t&yJZ+meUH5FGYR0LO9)+D%C-Bf{LzvS%Ysb=&u83bODUNaKH3|MpX?2 zQIJPkL3%m!vbz)dM?M<%LiRNloN~-B!X#v`U<>o6fBe~v5{~wYQD$kkXSZPreagyg zOPOv>&gEahXAcrd5P~y5K-U8dDW*|2n;!)aQ#rj-`Z*x?aDp5E=W^fKL+<$<_-J!>I72__)J*5|GbiYwdezbw*9X;MKXmTP`PBl^0PW^u6Th60?z>i$axAJ4t`0$m^S2(Z^Xa)&sJBj?V7g%_ z$_^uW(nq$f<3DpcBfCl7nwR)Ro#f)|%KBV$d5z$4oRQ3s5#_W95_ZDdTP3elB=qNE z)J8X1qh^{6Frul_P_xqS)r2^; zn%unIa>MJk(KOa7hLPVfBrVQl*n_Ya`Up{?IL6vx7_DWxefci@qYS5fsgAU{sy>m@ z&JF_;VZl7P#v(>*3la%jXmcmr7-u{5=8)ND_p;-asvmFFIx-mLPS_hJV30Zb4qMUC z=02!Us^vJ}MyO9sfO4<@alf3)v7&uWqT@1B9>tIT8iR)dhfVFuzb2X==RGWWYIt$0 zzcHcVCx8>fdv~lZJr4pZY4k6>yvVP2~E(!e($z=SZI90P(nIgqVvfLui0QYxX z+CJ3P2Kdhe--DD8au#GbXuL${v=3OVDAXH%bdd4CK<2W)s*9dzhwX~S_=yU;zKr@z zzk;coL@ZNQ23*!>!hZV!(&yNBP)~i&IA*5vOgqgEz8cjU7!#|yF;0v=kAGL85~dr5 zk52*Za8#jTm(l)@Nmp8*_Rc!FlU1uFSHi``;#%g~9#b0lW*gl@;$AVKHa41lfupu3()}yPNnI? zDfq?O_~z}hbI60*x1q$fSCRDz60cevl&&}Z!q1(Dy;xrc^UvzHqeKuw_V)W!Q?Bo$ z2fU=LjQZ?+(J5BT3C{}z2$KLPcn>-hDMLgX#SRk{RHJp`k5T`#r$$%_EPatOXXNzl?96!I);1w>biwymBK3srVzcMww zpi!g|8hu=U?66#%I&6fpHgoE3WXU~q>2i5l8x4T>RxZPeP75zo?^*i_-@SaB78cpN z6}emH0M4fo)$LZjD+lGqBZaW(U+Z)Hr}_Z76Qji$ktYT{f@cGX(%#S26~tZH@TJqw z8emAuzO(A)|645bY%aa9oi1}rgOa`_&4oKgvdoy|L<|S#=Tr;m4)D2lqF+R?Dd9)< zcD`S`S@>&AKS?R+N@L+Wzk=m6X;L|oZh3^s@C&@N6L1=_B9zr1-c8sn@C=l{+)&3= zHy{lLj7do~H~wg8>e~_?sED7-3PD{=Vb0cKP;FZpe%4pbymLS4At8yk$oNjozgEEM z7~b^o@I)RSo(V#crxYBXhj-b$wGdjNsHj6G1qH7dzYR~RY&$fZ+u;efJ7xAK^MzVxIwVfvyV8V5^ zot!?+gIoXT{_uDfMG0K-D76;9?c}P!Z|Br{HowEToY$Q%Z*hjHWzhFNXvFpT0{QXg zNE%>y0XOVmk9u!X@k)1e^|@dNy?=MO1%=cz1gUGXB?klhgSLC$b^SX+Xzj4n{2Au~ zobHIzcYJ+KZJaHE3}9-h8kU2Ix9LN=cyO8v|7sw~|oyd`>|6fTcV1lx}ir1TTdOiq>=T3(xWcJqQ#^aD}J5i(vSkbMx$KWl3zsZt=6t$(s3^ zzgM{e`1m;W(a!UAj-%=u8^%;Sw$A=lmNBJ533NIouhct^|H|;Zbgh+s5)~`|j|vJR zVh22L6Rg>Rt9j*eB^Reu*1>Hd0#Tn-lJOAdTAFkVW$AwmT@a^48=+e67tD!J4IJ8W1%(dhI|tN$*IspYi5dKfaG0Ac6cGZ;?T*w7~q1-4IeycJs;a(8y^w) z9o9pwfo<=>%fLpkZ0e{gP3dDU>qp7Hj$S?1)zAMAa+I@FcppA<=EG-hNtZM9IaBVL zC(&g>JOX*<*)%JfipVohiotJ)4n2}YbWi8&1NoQD-`Og8f@Rj5E9iISe$e0Vf7Mvo z!l6jugpKQ{>_5RJY+@b3^-_3oZ;I8$pT5ys{B6;smdGpO7S%s=+ZN6p{T>QQO#5TR}$I;APF*qSEKB_10~_&TPn zWf4xU=Ea~iwxu;$3~rP%E8#E3xi@IqhQ2`)a0k5C8K{_4%P;UU;w;TvR$=I4C9XpXxv=uM&^@clM1w)vFwS51fbJgYTW3E?PMIxG|Tv zwUJ2O$)R1jhDN@Ja3lO5=AkE&&c7^YI={2bTp?YBwrFe>Sj6&c`Em)wrFq_hGv?|o zjnU_q=WXQHZ6{<$`%Q+VTLzDvT6-;3`Ca4b-g5no!EuN8n|^t`EB8Jl#hFPdSF9aA zp#lYec|r8~AzZf<*!+B8BIF|etuD7|1v@7cI)cLRmZJIG@>AI>C=n`%jvkI*hR0Y; zT55-lmp&urHv6)|%Y5H88SUXCQ%=eI^cXnHyB;pLy%27yDNAtWHV4vZC!Q*eY|55svE)sm zR-h=MmY8T22A0A$D?xpv@6-wwl@j&n|{ zdQ2e~mNucw{+zi$Clfn#REltx`e2=V0*q&z$HvHase2|jj6pelGjs2giohO7vE-!5z zsZTOg*P&TCPn@#l`qiU%mH+z^LNOHwuUfr6*DYCf z!kQV@%eqIgdqj5xh%vqeJnr@|zFc-@1bM{1KWc9E#{bvG2(p7vprcr2S&csc{Priq zG=Hq_h^;NG^#KB1pcv{CsDrvyF+FM=5Gg;3`bX01g-x?U;Vv3vART`$K?<7Im}Q8( zk%ZDC3IGWVN78MnX+>L(Kdy))<+bq3`}9WZ^E=Ia9jPTU8Wy04cV<5X!0>_9W1k;w zLV^jDwyyO$CO?e(hm|xf$VdvcPtW<#0Fhf z<9cOtq`Uy&U_ml*A!|QtnWST+LxqdZiRSRN5{K5nauNH`Y&!TAz%h?^{($y9KC$gQ zCET5IZCeQkySUHTv?h_^w^*==-AWiVUa=l4UQs^XEBmJenPB&KTdi<}TNR}xIxlqst|NG2B z|6%)XZXz`9Z+atgonUxX=ryaTE0+=EFEbx=``y+l+Tq(_Jz?_MTfZMiaVCd-K)hHp zkif%3Pg(3QX#-R;K9V_m|MoB^LlMK*RK`XMZ~{^Hf0jPNQS_pTAHT z*I>XM2V*5wImpGHLwh>$ZWe6jeEI<{nVHLf*6)8=Hmj~HMF5!H!(&deG@!x}lLW9c z(betlRVE9{|JuSdCWF#4U-+(FIj76?lv+W6dwf6Al#OHo-XJ|+cTHpAB1wmwbtlKV}uoEzzEH-e3Ta zDr6rsb#8wFF*&YZ-e$Q%kU*8uHGW`_8=GO^%ruTsSg~^-%rc#6?8i~J8*N&Hk02UJ ze^02g>Bm|bj6SPa%)PQ(b0Rbr1iY$ zVr`gKRi2VMa`_~Dk%*P**juUl;7TV{1>eokgVXBVUGD33$f(LyPTwoo<_jNNLyq#7 zv+UoVj^laiP8Y?`S~!bLTOfk~!ko#Dh9#D?`tcP$U3YO}zW*mMRY~0oK>CY&1YdY< z4tO?4ovf0#8_gC8d#0BUKXKp}tlp)5<=g`xPGMpt@eWu|pQ2QAZyMRA=@Hz{DNsI; z;_8AX;Q_hmNf+&@Via}@7P=(W7t+h_j!-G19X ze#=R?J0teY$2Wiwe#_rcKbGI08-Anh@A;y6_>dl5Oi76MlmkbgO{N~AY1=Xh7y|$N zC^F8F;<;~6k;--`nY-GQy+S-YjndZvx`${2qi097{7x0(!3_tD{Y=pdn&F(SeqYwB zxi**V^OqO%gJ_<4!F5UW7R!+eBTRvjm3{pUczu~9J-@!I(ELeu&)zTxgYXO<*;jYA z;K5(?flBJ=Q7{ff|8xsXVpZ9D1MwTRb05-@BprtM#$N{ojf2@iv?VDHUbM*sW--Mo zbYB_thLxSg_dq!KoM+|SCG+xuO+b0aQiaGk3kiaKORHlvG<>!(eL{f`^eFVB^Q|ZZ zHS2(hR;fHT_vh$I*-s*k^XNCdMP!e%_bcQ;n9=B!G&Ls&eu`iues^tq1aWgjhwH zBW;WIXAkx0vy3&>04UW&hwEHEsQcXg#08waM_JJhOfcU5>c;9X0dBu<as`1USd%2#NUmyqRw1~&a(Hu1FPeO7PSuS2j=0z#fG!C13vg`_B5(Tc)FXc`nDj z5vbbxgX}-90hAd+se^}W4w*J17f>;m@%;KoIb1gE`n*Ozq{6NK`1mcZ(Q)g2#R`Og zAbtAth=y7nHMkyxV#-IMR3exgsU4B>Vh9j8Cfrv_kvV$?{WIGJ!2&o!Ru`jn-emVJ zm)cPe?OmjuChu(aTq2Gf)g2(2)%r;d^LX;zf6}hK#9imScI|Gg)r*xUaU#h9@-K=} zf!~_FzR%8FOLtlw*1ovPx8Bcc`vd)Z-D>?9CWBrrMeelifHQk-*gsX<(Ow&Wnv-Ob zz@o38WY-`>*f^2j2Zr%*gD3us_9#7ng<$#3h$v|VEl?2!%{kroi$ze^WY@1m*=7c* zcb)AM*LQ>$y5WOWoN(b^zsvBLpHOAQp;I5R@+1>>W$9gWOq^{zcr0>SON;%_p;It6=T+F4eS%toI;TdR6={QXm zTlau5eMo7C2mP5!FT4YeW*ijB30*vqkh*`mnSzhMWNatki1rpaY_DdWK5pOH8krGRnzV$ z^kKoeJTzRrjhF7TvQ+%QpK;jnM90{sT-bGSWvaIHA|yIZgU^yOvg1sVfaZh)b|Tks4!W3=TzN)tISY_EzKCscspQ&BLnm<(c3^ z4qalrMIrnw2yOo0Mn=Ku$J!ypmIX{}V`$VyFbLl>PnRq^B4a)-0Ov43%`SDF^Ox+j zajj$8jJWSL&2#+oqL$sM95Fltp1(7nl?FMy$=yDA(*@8_g7ywGE}bm3rR>BZLJ@Q~ z=4DWU64L%3xM<{$AY$Bqe0^3F^f8DSccl7TXz)^kfFdhOH6^c`*1jX^q)&lLLh@2q zDS8ck=lB}h9GUDQ#tJn7m2<&Bf1R65>}pF75FbmCR4hbLiu>X{z8x9JyJY-x3>J>F zjXzP2i{F2V`NOjQ{uR`d_=5_#Ms(VBH%}mt(!heb< z4B}p*-M7H#L2D4(po3M>1~D0FXfz&x9cMhwU;0z+^|rEkAcooPz*Q99F7iD3urJ~H zI;Pu+^d-zS`jwdLqHu61BYV%Wl8_bn;moc`yaz>g zT9HziPj3}oTg75zQX74a-{2rA5S9p;WTu)dIPIFjSxH}P+tbfKJS%dGXV+VP#+=8# z6zyLdEzRI9XIi6$V>9sMaj`{enAF@6z`QRg6`pPHy~w@QM(Y3P_pXOW1RZv-;tQYl z<7YaEL%kkQqWUP@ZKrLx2e@BH8yCfme!n^*Znxma+7n%UCLCKfL{VH0G1>qBUZtNM z(U&4Ee8(OuMdj)Y269AAPf~VR2^quu4%7O(EvNx+%76!uw@NN+`8hRd6c>0uPBpb533CeF zAC*+nQ9nMJ0PDS_-z6lM8FM=Fhw{;nZvv;Q6+@rd=`z2Bt-lSgq2p_U{^usK8S*9_ zCY3i{!D%^rCH*nUAsRRl?4+m90gkxmO0LwbBnfc2Oi-XTL`k#b;Wav?l-YYi)9t48 zY6+X?zw6nWEB4?_AsEoA%o(0huse5qPyzjN|M#wM<9%_a7wj8ykbf{QFH1hwhe7y0 zf=odZi6a+kBcqO$c90b((4Na=sIGjM`(a`+mkHMf_NWnMQ1(T7Jqo zdx^v;g~JZz{OK{elSlXm?p^{nPRB*oV)~2(@KrS%+duSbzxPT_`sB;vDBe?x^L^l| zw(*3RKB|@Nv+hxMp~q53>%08}O0<^LQNJqt_#Oub-5{{Iykk6Vcj1iV5xoc_Sb7CK zf*{XfC8uZ-*Aqi3`LQxRX9{|F0v(}`tKuW&+mK4mEZFxIW8pUkhR7IYZY@8~tGQy0 zS2?^=c?Twtfgi-eHL=@OiwQ!3JK?u`*AY*}`|O_Y6xM(~!2KKN*yt6SL9#=PGiHB% z{Ci*?RjS-e*P*%daqHDz^@rawYMm!wQY(9k?6q@g0z?!+h0yNdY8Ba-nVUsIrmv0FeGCrDLjL4`@F`HK=g@ z3G^)&GkV5hRlZIZs&n436;ydk;<#_W__!w$J&r}398Jhg&8GTF_c}RRId?ZO{cQ8M zb0CEnyjl5cElV&&+EH5*u!~T+?Gq-!3CWSieB%5>1m^mYLlv5=wN|h+5j2cZ42WYP zDsV+Rlg98flW3_xm3gytq>K@`YiOR-UX8{&fYP(|EaQ!8v$y%C;znC>3c)2AL9C zy8duhj;0%09sgTrGJJM#@`*C#_MGv$_~{}`Z-l+-WfowW8A z8&LWhVchD*r_X#;UuG%7nNO=x$j0)s{z=n&59rQ}s0%2f)|0TujBZ?SeH@3hZ7;X0Te(r)qh!xd*#5=2yD1h4r3cWXw znY?}6*zHo(Mdbu@l^Z|D!Er^#ckkY};oY4#n*bf8sqck8D~5|Ql9WB_-S@^tP9GSV zPS*SRL#iV00s=-|)|_Sg4{gTTK_Yl^T8ep|fu2BwTmqY%qAK@C+qoZ~JNSc3^c#WN zK{y|kX#qtWL=z(JOEKZRdj+1Unux!m|B+Y{~4+rSBebmgN@_PfvGk>mr(D22+mHYiesXK#W zX#L}K%RQem5&uyg8B0a?BX*8UU!Hxa#ayR><9ibl4(e%u;!G-6u-m`lts5lGB56Os z-bnV~TafSl-u$!F$$@Lrjjm~W`n2`<-Hi_iHkj+K0osf*60Ay(WZueboVSN_RW94vddTBsh`;4$?ZkN3*W7Lu6@nn zjTS3R5&8U^zRy2O%bV0?W!LipS|nVi8o&c3LzB$gU`I%^29M-Iz|YH8j}$WdL0WOG zWU2!Vq#1C#;m=`JLNgs7`xa57L+tl1kl?H>>LQVXh_Sn6> zJ%HMAPp7t)hC9e*ckWUQf1GW(XP;y@9mhM+(0!Eioa?uV$~CyRc?IaT`?prc2ch}V zx6BS|V~RyuZy@p%+(V@ZrhMiSnp7d`+>cJC18Q78|M1n5X4GdKNyxx01&IlZqvSE= z`uRB9ji-zI?7aqZZ+3R$!$$bhZNT7$mLBm~xx=GSrNBZ0z;Mhm;z-@H4gv;sweO{n zyZ8;;P?Uwr$ivN@cVEi7=|`p~*d>}F;->EXEHioMampw9R)4k$%Tz>)Qqd;ymUxQG3-<#cp%j=EOaEb~yth=q6_XR?qEiE%A~MiC=cBv_1hBK%_@!r8Zx(BIGFKhABK3T5200bn^lB1^vDt^viV7)0a z6yFptbu{93n+K!$I{dSjbA0CLr_+16SK^&DBjukWlLHRnWq*+CLNK9vc1tX0_5z1e z>EoZYqL=RH@%~J$;K`A2$u@?Ti2s86<9pWc#Thet+jfCV)Wk#DsEd#d$Lzyc+Mfie zuiB>HSbkiTSc2JzEta}O4$ z8YUGWri&u=WQfOQgWmD;rEgHh=0NC=exDY%AJlTorVDUtz6J9c29yp?&sc$FHaKeQ z#9Q(YGGa^4JI|d%EX9v{0k}N9MVJIy2U~ta`Uts$H!vm|{%s!AAKx}C~wUl zntHTU&O{bLsCE04*uk$3YWG(Sq5aZhz@tne3ZKVv+P1mc`kb&zZP>n@D0DreC+als zKYu3R&@j~2e_^Q7`f9hsG@oi?zK~jLp^5dU`G!9zLNy8QF69Flx>M42qO-3zu6IS7 z7bbb6Kl4VZ4mF*-RGw$XujckVD!W$K4L;5Ihj^o)J3yu8QgSrgQ_QOXuK8m&G#!p} zq;*!5xE=b+F~q|q+Og%Q&g zDH}mjTlqZ=;^JfhFrp;iA4z6 zld1-w^==Nuhy|CY>1=eq{{S5v7)%2e@3wpjrR<&bENJ>Y_(Vmrh17it;<%=PIMEqm z-FYs!w}I&A$na?@Z@N&p3YzcDN=4)1B77huQ!GEpLN+Cu|AcRzhC2?4e(S0Bf%@qU zE%iX!%-ZM~617_fMct!1ReVcY+t;hpm#IOI^2-rhaT5W2cIUD64pqqX;mMHH_8v!0 z9vlsM1(go)jclk2k1t+sU%4 z|923;g>1eTN%lY^7m|8(O+8M&=^9XOV;)z+HY5Mt?B1%lfXeUw1c@otKdq^Evp@X> zvwv@Zr#(mp3E7YG077ZVtW<(fny_(!AHya6!Gw^NE7~`7X)LTqh8G2O<(@&XaoZ{v+?Er6v!(K~y0mz_n}e zN1vwqt>(!(4nrheuaYbpp`&+#teqn zrzNOfc|q#AyStD}OEpC4gG=I4_rHFOc$9>|)YTCwaBzFL zD8U#2VdvC;PT~|oczdKbM~LK#30Skf(n^t>aJj1%{^Z^ORZaKn0#5^bt3trj9n4S?RMg77J{b z^3#gdAxp0-2DiwPX^G^I?_1mu5U7_kc%I)m*2n4x;$!%Kb}6#j?|i2(JSk1em>di5 zcfm1^e^)PDQO-&Y*xA)D4yDAXx%F*kf~X@Z`N}*aD%NTVx`#4@QaipCA|T zRJyd~+~1bg>~HX3Q+*KY38Ho`Ox-`)QM3=;XRhhU=PuMxY=1BC>oO)BJaU*wvPt-h za}}B#lmlPEZD;i3{k^@|(Q!~u1>CrN=V`C{i-*e%`GT)2fh?$h4)&&_j1NGjr!L()j%I)X;wi1?XdMCq07_$s-J<&i_ z2zJ7OHl0obPgy;&7iCJ&vdNtvu59p_TrwO~XhOChKl%HIxoWek*o8<72r1c( zL+?cx>tXdPi~6^v(@paxf@+RhzF=m>r6(k}oIQ4=rwZ1zs68lV_9f8H|3{fmW_Yv9 z=xG|d4@kMc^r3moc@1J_K;xJKeH^#+YC#wsQYZ(9K2A5Br}{LcMTNIjoo0tZ6MoIT zH`0DDx9WKP=X8dD04I$>zx3@tfU^Z;LApB1zTskrDOaI-seaisVh@nFCiCvWL=i&G zXo&F%s}MMbt+}wt)^Q?M%R^PvWYY3!#3$_yWW0ytqPHh9a%|2w$0`{QiF`#Uc~n^R zwP@Jv1XEk~sqZBNoEp2XZ8@ znN4aMIH}-C159&a=pOZ==J;fhV%BQ?N3*e5uG1y&r2If+L}xs~UnZ{GQjU2Vo+j3% zdOZ&{z8Y<6BETpJHmZjE)6l{eE`ywNt!uHKZuD%=*&Z+abqqC;l&bQ!&Ul$cBBddb zndn#x80qGPSkJ`L>apRJCK@MdpRsd{qhIwu)VXx>S0o#NeSu-80bfv%DAUUU!JRBK zr&}fr^K?b;{dA78eX}`wnLfkm%Oq;1fDIPL8D4i4E+<@8X$Ea}P>Vb{ z9GHe=yWy^zE{C1H%Y50TL+5EMFr$JBxkp&SPh2^{#!KL^R$`CCYQL%WW#?bu9;o2y z$i`|%)2Uea;V(*kHyjRe=!mI+vZL0I5tsCOFIo=&jRD<&AcI%fYileR0PW8-*R zwua;+#$}{^0~D_0+E_1!z2lFOeK4von*d9kjn|8IB?i(9Tu96-7c;};>P8FSo%LOe zSf70N7^WDy*Bg8GNi zw@hOsU+W}oEElDQk^0YC*hZ3l1e}Bwdp6QE5`CUax>30hk&T~;nd!QdM%C=E zxtbME#KE(+du{3~jJ-+}NaQ(!toQ6T5ug3ArtaSWp(yF{$q;zma^<344)q6Eg(0rR zs>!!s{=y7s+GiE*8$#r`t}mSFmbfQ5%g(=FL)jlXjlVTPaaPxzYX|22HN5ks?g1T0 z@Y0iCz0CA~Spd410_DnMg3^U=5VC$sUUVzp0gHVsiaYx>6K-r|uPveRlw7Xc<0~JM zL=Z0`if-Ba=DzNl!NC(Wl%&HPOfg8Px z>~)2P*Vu%w>T~%#tDa?6bOhzLe7arp%-g5dsrPvYxbsF7J%4;^bA{*|-e$vx0YL8$ zFbeFCov6)7zE^}ecfK-5HGq9TRZ-c8k4 zXNXi9Y|~eVJ|bIhTn$;4od4{vcX>QuT`11)!mUR+$wV_zkYU+$E8GN;X@^G;X!elW<>6XtA18z+Nbsi3$Ad{?2Y z>kT7H!{J{CO@Q=gK$RUSrY6dM)=v|U>r*CbVo(oZz|+0KG-9Oi?4*8%;rCBfH-(qM zZAJQmpFa9O#-y(pnf}KQz!9#dYBYv+>SZ#2@m{0$!uBSibK@l_R0Kt~j1ot+r3~IW zp8UW5V|3Q%jTaAIdYj6Us0F?XiCn_1%iK^sKANMraf?&w(JZYf5kG7HZto z@U2%L-6mpz%hif{@A$7(YDK-{8d>nu{*^Ndj5toOf;0z4GK24@p~skC(JST(9J%`T zDU|q2*mAZ_;M|&X zx*<0d+?NeeDU!;+`;;A13;uj77afNniHr$vxFt0W0eQ3EG58Vl6-VBh2@&no4=56>hh&ER;z?{Jha5g%KK!ygZ5WUUjH5x!iwM) zqgP}S=A^(l7PHAbDw6J=L>oE%g^T+CU{T*B>o3Q+s$$kpJB{FDL0nb{BQaYR5Nq8ChuxxmPOa`K6e zJ`WaTy3>MUUIAflcm}=>00cifPhUPDl|@tXBxiK`w0?S%Z+n&U+!@?8Wot)Zq#H;J z^BVfs%v$`UIg3zKkcyV33*+is&6*$jc5YE%R5&8$|N0 zjT}5^jK33wOc1HYi+`bXiaw)Q6pWeghhA~ug9WustBlRY{2SNbzt+)FRPE?Va9U4; z`$<5ItTtyi`h5T5>zl{=u+t|LE6^uTJ`Swq9R1zs883T-V5FNpHl8{T%;A`5L-TyK z6<|0fN-<(_3H{Uy=*gy5!Gd{{O13qk5owVNTOcn_(du@~A0b8u!xI=7O759}Sw2T?r<{~o%>oQ=;vm#-)|NRfxm zH}I9U{DQXJuFVpE4qz#S;F!O}XoE~5ndE~T(G{#U$xa7RIWqVTkRQl_J$@i5c#Lid zfhRutCMQ9zwvxcD?kvGf8eN80;lAPr!{iDhZLTx-=@p|Hvd_YJCY7a5suV6sr-2a5 zLslu;+D?W=Qqd{{AJ828n>rGV4gPQaslfK!2HJ_+>Q#smy+EY#Sy9{SZJ4;BPwsbn zZ8E+vw&<~4dTZMsN(fQ?#>`(^n#`R zEW9;e69!N7*>A{V`XYnLI zi*Az+ZTO$RByvmKgHbhZcAm7&^%P{0Y!a^Fo*%otm-~FFt}UJA5H_M-`up1brQKc7_uZL8GHS9y$y1@GQ@2Z-F{fiQOlk|j}t}w`x^d#-n!0mbX z9d{6hqy|x}+K%uXwVZp{V1-P4C9xAEta)ddyxi1SZN@?D<`Kg&3*B@)2$Uv{(T4SQ z(|ZKpy=Evx=bk(nE#LcEACZ%bTK>MzismvC}W3h)AoW~*2pkM5{a{f8<-tnh~n;;2%Y_HmQ z+*=*70*O-AbBX=3Soi3Mq({1s`(o=R=uiKoV;h}p^MM)p1qk8bUbWyDm5aU?zM_JC zd!{cEvqWIqoWtscNgbtBjRQS)z*0Kc7)!jQ;IEZ+PPvC+3-X@$17Mr!xfC|jV{N_`~%LA>K#|x0d?gY;+w%JVb~^79hdB$BJ@c~AfwKUTPJQF z6H>Igzkv3v(Z~>?Sy3Gu2A2Y}-6CiCCS0K{Z!U4L;H53XWQNU(w~wyr(Xss7*-AudJ|7G%vd#YEgL;XZd{EKQl{XgIGP5U zuhz6wm(e46yn=S+?g9L54r0DYeoChm^-ybuBdvYSF32|;f1h2;&%^a{t#A|hx1qD4 zbtnqxOq&Bm6tgC%7KU#f>dv^i+&%RP27wte5|ZL4NwUZtu3}5n)!_|E4iK(!jlNpj zrE3t}*sGYSko$NOWsR~C+>NfbK1}YpffX2K)$LmrVtfR-fkJVeEM9p)_t2DL@sgMZ zZqEWz#eI;T&`i(&WIowQ9@8b_-vvJ20fschGi(J0=x;=S17Ec&>y<)IiX_tSTw|q% zzo4G@wocfpQP3+&rLy*=G}y6UQTUaM*n`=&AILU1%%7(inz>y3{!BGLeS$nX-|Yr> zCg;`1H~nti+A8u4$J?Nh^rUv|6f^yiVSD9vGwKlHAiUx8O@bo5G_FZgr=Rwvg1Om$;0#ZiH3HMQvyvC1?V?lln{(C z2Z|rF*hDwiEdr3zZAI35m)W51$8M-GGRyQv7}6+ct9Ckhu2X!z4o!n6tA<-%0p7ri z?c2_S?ciF3rv46@Hw;~3+nW- zG)|z`eV`omeUV7N<}>U2%+P%=__G|fVz$BbamBowCttwwouYfd$`h<`Z!^WL$e!C` z6fqx8X=U@NA<`It)T{#}KxOis^Yn2phl^XjI0-nY)yoF!Me- zG1dMDp-EAMFpHxXv0D{(dXMuVbSCR%=iW9f`FIcOVKne63H;sEyaS4NzdgA^dIG~< z+=R5j@A%4DT!R5n+=>bM>NDzdBUbq?=K(YjLv|sQHCV zzavu7KGm(i0ibULT=+JHIz#iXf-*$qEc~yziblYrN^a+WHgCKl~6kHxpom#;65A^r?70~xAe4Am?vl6 zUk-Kk1q=qDqcqDp&xCmycPDZ@ZGvTjFer}IEv5@aVG`NK9yh4-Q*TfS*qwjHPB1cg zZ8w-FEOxT;7(rQ(^p^KaDeN7jf?>wK-UBj+>=`nJhkwg0gAwKCPO|DcMdtX+JbB!b zx-O{au0-~r$3K$@*#G+dok&sdl zR6rU*q(Qp7q#LAT(cg9ZJnuKgIo}y)ob&hIdu-M{=e+V4QQD@?R9xo=TNe3vbdg+{ z!cWmy`lR9gwE_@w5uc7W>9^FDM)efrfDoQ~IC1ayqD2kkXL>oG*l$H0fm$ zKR?mC0$cRk%+#Ff5u`RPljD?BWNaA>Ps17n*{weL(b_@Zn#R$$)dt2IKqe20d%nBK}CcygotfEeTZv&tF5la2VV{S z<15b;!#0dqDEB|yei213K(&&D%vh(pBhBmSufz%AddU;wk6gJN!!-xfgzG^pV(!P7 zkfBdwK8lFQ*kw)*iC5YNsHju4PLX;zP0&_(hLUxQPfWjdE^=#D+8$P=!D=N;{&x?F zSM7iH&c;p;dHx%`B>#i-l9V0(TL#ILw;LJ}!5P}LPx|3zuD3r>z zn?R7|&z8Yx+1tk69kGjy|92@nK{t^be5l(h|6pKDPr8~edY;-Bfpbt<;5?AzY?k#bB|-FXYAivsX6n{a>CV%T5oKe7`u~V1P4gGr z-1UJj@1*VhuFy}wdZ@3BjWm5A2X(Oa05_=;OBbEaLJK!uNGaR$kp<{ogvl+{7PXsy zA9Bk(iRo2fj&!V_B_SQ*ppretc@B; z68%&2h!)!$W(kLgq2JJNgmN!%O%Eiw35tA@Ihduk{T=pl9{Ta#-PK_o6PBZc-9rem z(Q$2u(rfQs0@FeeMWU@=;yl*(B+R~X+=@~L{mA2Cy$^K%BHQo!-YW|`K*G}$I@6&e zUxUSDYXZ1sH3GoB;y85n8-EyUuv({1(jO7jGB|A@T8r`D%7IQWcB5o$8Z5RCyxk@5 zWza2s>)#QVvbW;+{sa+fTN~5sT&6>W+HSsSAGYsJm{*(_$)Ux5-fKTCu?BuKh@=!! z=*b(InUYR9{#Whq=|t3iy~;%q%ea-kZ#uJXR1;1!q`)q@EBbOEyGgQMSPI-1#$dhL zzjz91a)k$Ypvp_bgotm(uf^2hOZ29Mft4mtnfx^t{rkoA zw)Y>r%}NrUR*%ekpzslJpol2p`*>=g;Q74N|DMrl+oV^%(UCz`+^<*skZzyvhdj}LyT!SUnhWQl8Lq@PD9QZ?59O) zI==e&Yym027Xe7j^3G)J<4v!9^dXCihEh_Bq7p>~^FD0ZI+YAt>nL4VFIANSTSq zcTo7~f&i#44h(3!AONc5pF9hy0H6xW=-166xb&oRSmg+6);|cc?G?4#^@1zYs~}2e zi&O-Up|2l(r ztwxHvN4IV>m%i|oM369lph@^(dxogsBj9&r09G%-Gh`|o!ObJ*^|-%$F~exXcwK#yZ)D(X%%td?hn;tYTais)Cy^162K z^ABcNOWrLe)igTK4s~4u2U#cJ>i*t;XhtLcQ%hC6U13!6JOVxhnBz_7(q*Z$;;Z%a zj@y%nYRK`mxet4rpN_#txD2qz*9F39-`Y1{h|QTh9?a;td}5Z2iR!ucDH(WT1BjHG z-3Zq&>9+lv3^$6_(1Ug^SRlso-tGGd4MiXiqA_sr)^DG<-!HG)gIA2k@6;}pP`|5A z9+gx(xCgpJSM584GEEd)J7Ir>HU%I1+TZ76%YQl)F1CF51jo1>FJ^jLbuiEP@Ifng zVMwWx<#2(SQWy7+?Q!cS=A2U&kMUCr)zRXT;&tir<9MKPxMda394=C2q%%j0?O zfIMqwC?I~XDd9~ne!~e#RtmZ)3w-1_W0jF-mmSZ4-^#^dzra^qFC`}hh zK+VZ3Kl@|3YC@(_k#5=dQmWj<#0{vNw~X#Kd`g|~0|h9L)rKqX0f=@d+x^As*tfjn zcVThJlhFAnrE<&0@6wC|;rv9bzFm+jh$t|TKcn$}r)|Izk%%Wg9mp{&g2Kgik8|Vz z)_F0Y1=?pva#I2%y;M`cahZR399G2jjRj9hPL)EL3nE^NJExMnws*c{lB12yF;k0g zg@thqDT9z|CTrK#ONea;oB5m!Kqj>e*$Y-az?2w$Yf6pO+v9mdTO#Y?GC~Bfzf4#o zQ&~Q_z;Mz^dwjppf04`3=3d+vm>-RAn!Oc4Z!ToseX`N&b9Me`hq|YZE>Lh_7PczW zTmgfN64Hh6zMg-P`2A?)={pT3_Z+tCHgd8XpvZkRVJu$Z`S};3=>Wn!T!zYIdQ>X%_xForB}nsN$~uIeAz7s=d(+AHrk;<*omN$<(vUwEuQB_*>rDCM=*@ zM=hsUVfse9?>E+_@+kj{sA&VqK?MTJehE4NArl2n(2vgzJhVvKNcN~%Z$VJVQ9}t+ zC4vHGE$7*ahh$6)Gns0qbX5zH)Qk|oricW-MLD=>E|(9NzQFkxbDZnx>(6I2sqTHl z5z)+-Z_(WULapto0mze^JeS^-=7OvdlX(*bQ3QeBhs}%AI5lqE= zD16x9Gp3FuRp+Geo`jW2Y^WL~UK=Qh5S&;mS9RgK?QoOieFD1&113w3{7<{qPm4x6=i%p(LJaP>6hYxk&9HO*S)7 zD>?3yN`^Q$v9;kBK<)&QYA>`-O4B15_pp4)o}cL?m)^cIIWn~{Ko&aj#F{CKW2*oO z7Qh?P4&rE~&BDbj@-!3k(#Lg|Bz5@5Wf~QP+EeN&2#7aR5$|Qml zkC)8ZW=+C@D19>79DW{s7Fi#>4AIGaj_IRrU)VBS~fM-ygW;yzKhE z?Jr9UY@ot(`6d3c^aza%k3vV!Z%KoQOUh3{Ga7mqunC(tKR4;p@Z{5 z<3ek1r&PsxFC_vfhd0z?Vf0dEEFKqX6Q{es^FI{V5*RcEVPU)8JeEe*kWZ_EDt^7c z|EM|Z>e=Pp?M8PRSJmH6N-c)2R&z*AKzlfl-~z?{o%6Qy*ejvRRF`x5d8Uy~5PgUY9V8==Nq|6~wvIfspA&-)Ne1<(w@tteUty6zbbW8~KE*8J+Y~pa!B{CutKUn!kz+b-j13@yW)rbR)c2OX}s9 zJVxFW;$OX<@9|5{_7lQsD`6Y*C0iHFnZ%<1p~iwHY)aTLm{b|AqA_@RQZA5NgZCQ8 z8Am>wjaB$kip-w-$)PZ+{=QyvaJ70lTb|3iABX}_h^trn{UQAC;Uu<8W7i#d+$u=3 zYo1nCb79!oX9xosGkH~y%e+a~{5@HTcO%7#xDTJ|FJ3Yw=dNccIyJ(nVB^atZ)CQd zw^P#^BH8cM#W-|R{PRn%&hXEw70dS88zr_E zun~sx&i@lbIb#8flIApnwA|Y4imc5??KNEdQo)|Ian!hLt&6upz1Bm!jBfwaAA9?= zL&5A)8zcQT;CJeKbp1o@VD-^`1)D{2ixZc36RoQn+5q}W?wzf#%m8teo8Wh34g{n- zM(HDyXWj9Alok*Lg3&heFMdbz0b&GW18u}F4z9#dm6Ht*Fs!JPv^0=EwBhB;BI`XN zPC0HK-`U;%{Y0TuOC|y168>&%>QfZC*POO8?Pc;Io6u3G z0Z9tY%JZP4Bhr-oHGgS{Ep3Q3jH>CvvaAeta27DfhhQsv;6!sXR(crMU>Nmc=o>Z< z+z1_+(A%=K0tTm;g6Po4Mt;=eQc>+^s7!yRVYJ8V z%({r~-t<;Ve06xRGKU1P9>c#}v;6io<%)y>%-B_Hq+X6Dm;*XJcVX2125p`^&X@N> zIVIET~4v4jJ#eoK2wi7haommd~TQa zKr>Vy2F+WIbu`_T#?5I1=R=9`2)z#Mhx|RL6q|9fqo)-|JxjOKF90M=f8wp?%2kI| z3Y9XfHJPU1%N-8f>22wEUL&)33EwD#lQP7ZN7)gzrcig#@`JCh?QOsv&rhQiACG@8 z{d|>U@mNJfi7rp`b9=Spx7WNg{alA=d09eHZgKl1YYG;Pjg25d!#4#t`i~oMs)pHO zq@%MRh?$SA1ZwhAT! z&c$3$vsiFGYh`@MdzKqo0VHTOiIk6I_yz%VyP2Ji1jUZS6oE>>uHY7WT}N?+9?JB~ z3bD)HK;%k3?IV%4LTGVQQn&XBp{E=u-)5=bA=}<|Eur{iReUAwJ8Pnn0)+%8cZ>CH zVfJdq9;7W@d~?O0{PxD?O_3u}XT66uHSS!0?^;k4)K$G7S}9txH{t5R8vyZwL!bpe z>PoXTj9wniA2%6%8eSK^?tqv_!KhB5fFdB6HNHMSD9Wy03;{eo$ve29Tk)Cl`c=&2 z8z5L_5Ocr2q+!a2I~$!=WlJU52CU+lOCooS`Z1BAg_!nXNs(>=v9aPYY$m7NwIl74NX z2Pb6>;ViuQKaX7k>jq$5d5fs2N7=TfwB`bUc8G%}d~=t@jFSWDXwi{&UYFBx>@Q*L zILbcGolnqqkXzX>q*t(+{x`@8+x^J@0XsH@Dj=jB%bCj-r|@Wcux=MrzUHw@g6a7? z?!5^VRyqOanHC<-^&mIp5Gmg{+ZsCQp97l>OBqiy>pXT36IlnCw-t~1OcH^wt|e_4 z$ns;#pRwUkikx7S&z+h7JWg!bR5qocK;F0mdVs3;PI}*uD;}U~__JDm1X0IYgEGAL z+FY@|MDqxld-c=4cE%l14CUD;Y41_ZJD4(4gEDC)sY=>Qu`OC*%x6-m}-)k?k|S3rbPR&KL3C@ z(b7h4jFv8rO{GI#Jg^W=&P1v`T;}+at~AoV=9_y-yU1wM%co!1spval4#xe)+sW(| zaMCD$CQ4!!!+$Kr26IQeU);Zu$8HC+7MuHFXReHihc;gCy?qonx34)@zhv6HPd3f{ zila0xA3r9D>>rKL5wW5n;elD$?fuO|nWuJ!2lmtNo><=H1u{Y?5uL!q`K^^{i3IN5 zAi)8g752yQh`oM?MI_0E@X_hjh*dhU84dohdd${c(43jEi0#;G5h(7!VrLj6u;#ef zRfGFV;HQJMEp6$i=u^2e`H%*}&qDS1yQqAM%L&Vx`te_PuKWq_ZU4Hp8<9aC<05H9 zSsF_rMUZVG_;LQAn$FPy5#o$2R(^+|YlBzqOH&zX?XQlIw?FHcrG>4b7fbxy0XYfjTk*cC)JD2YG;F`NcXPPppsfCqI{c(V^$`-OW>v z$>nY>{Dc#|L&aN@FYaW&b>IN?7+QNT-s!K|kFYF6Ezf=?i0b_Baj#?uJ*5Smj;mTI`jepB`z-V6ZD!I1!LNzqt+(c~vOrnk;J3EY z_ZBefxJv7y_tybzP>i3@U~*|!s_dvNylyIXU(fy(fvg=>c>;5)NhWaTi%oXxnmJR% zZo*`44n=KP;C(??TVy$f3QyV=Z-C-OcA@^4Z^wt&Z}MI>GQWxbEblPE`u zp8mr2&)Qgb%^$|!o9HYj&L#Ym1{T57la)jx%2skh2P|zywXAC9Ok({)iDZ6?+heUU zkmWnGuLKv~UiqAZ16xCjo^eKS)VNIfb${XZ<^23I5U)TU^)A0(7K)!fG-#N6yq~ zNy>mY$d;DkmbQwl=YbNVP_mYhOJZd538&Zr>zohy$;JROLw#P8VHS74BiJbI!E)o^ zIyim2&6LQCi7Ytq_QT6zs1w0fE6XGO?I$K@OBG?FEpn5Lp)z<$&;uhJ7KPrW6WlX0 z)gK|cxN5A!P!}kT+02^mNf3y2*Dv_$WM%w?sI=(;oy*4|#E3dl_=TjjAukS{ow4|I zfYNXAo;YuSB*`mR9L&FM(<<8l$chjJSI8+<7B`Rd`!KFNZ{pF*(u_-OYCPSYcrp#~ zhm9HN{1yKnMD*4<(*4U)yIA^JC)a-%w%tgf_F`wxq#eKHLx|A{8EN>(7ZASDf8vKDtrEHka5!>1z;KUQa?! zHZ0nbbtHRypM~?9 z$OmVLA=x}zw0jXKnBwUAaMbhSsaKul#sH5HPvpe~g=-b}8i*Sz#oD-ktgw zGeLTN435Qn`6iz$1-@HQ+7mI~w8C9G{(QEhav9k_ShwiUbNnj0<5xb9yh3%tjM7lY zLt4ERf-q1<#?Jb9w`6+f4UBe;FQ$kQk4gA1dHGpfa#$?G`@9=e2n%#SGn^8KNHhD( zw5dlwTLzb<-17QfoTVQA4nMaOSS3~)AbGnB@4jJZrj|BBm3cuh{n-!eEucMSLdrF) zuG4{jMN%Js!*^?r4uOo3aaeSMuFluSh1>D|ulg@w(a5+ zuUkphn56ltCGEkq>X&}2C!_QEhsTUB=U` z6i|#*l#^}Avt+VWK3H`rCe5CR)-MCq389}@#5Xj;Iq>%7M~Fg{M-L=;Fj0R@(@?Q2z);Z3pu0i5SLvI zwp}`b*OiwRnJ}~(wy1~tE;{o9EW_!hJhQ@vPtvI@BswGTwXn7Re19xjQ=nf)QCdU1 zFb~AAz!yJiWd6)Pj*oiSES=rYdJ%m9H3vi9Y2VNNAVF!IJn2~jxJRag?V#Jmx*}ks zF+Axpd|grWq_VHI7-rh(RXgu^57Dm$Vzms5n(G$yC??dm^d0+kFPW)Me$Zaq{B0n^ zi(a|cKGydmT8#L({pI_qm+6i@frS(=ej$9=YwUN!JKtZcHPxWUzKlqUUK-LiY}(=6 zy%s@q4wg9({W$gaC5Pw^0RP6}XaxPz*5eCXZzq0N=9PRN|K;5!oPWgWPG3Pb?X6aE zthrB~x8mm3Kk;ogLFjoDXF#r8X++*dvd%C6m-8~Mj}H)zQSxFjJh=;?E+S;qU)vyMhUiWzbMdj#mj>L-TP;Y;ne4;c7lrw; zKWrB*`kpi;go7eki#Gm!EehL8KIuPt9Y0ESvIHCEei?t3bki|m4^$bTsDHejb))IY z6SxgZ5tKXPfeD;zTk{|S$~ZQn$~LQ5nXe%5{AOI{5tT~B?{u9XeLve&ULTyayh+N< z(GDsECtZ%dUPPrbjZyCIus^G6)|f>{!IO3xfd^V;o|;-L9$piFLqwdnhfKqpuU>9X z%h?g);KBBG^3c9mf?Y~VFkK{UbkgiPz;USLoFbMcc`0+CRYLac9xFd46+cow*$W4M z`G)h2r{(mx%d??yz@L8^i-v>EZ#&*;y3H#QTTt%J6O;g zH6cfb8CR`v`44N|VLT`wzMH9iE2)p=lDTchv(YR7ZQaB1a&gPKhr{?1S|b{0jndg; z*X%Mxmy*wlLZCH5c;~!#_a()i<&J;p!rvo3v0>Iq{)L|L1y15mENiKQ)1g=;r@tVm zQbc|=AiowQ3TH2#iACSWknYJXaU5xB*@4JYliSv}V%2+4L{;BQE1H&(K}R8bW-! z2w}hnynY1+8Mattlr3*T6FcG&V-J|+G4Lf-pZt-Kd0KT@ zr%6)MY0F7wzES;h-3%d3bDV)Gy2yTBzy-c3&VtMTu{cT^%tNI#$Y zo(_7rJ^0>R+2PjlvqN}n-!aL4;1s=fB(bo1P%Bs7}PXG_-!(C)`OOb+F zQLKAUy|23Xk0iH_^ishglG5?{SLo{7SZ-nO25$&$jMXAaWr}=XOHvJJo7;^*KAh^zxtRIa2N`_xSJI%Nr}2E(=CUb*4N zdu5%M$8%kwlKL0nw2r=W+N@XAHT<|bK+AtuY6A@yV^FssXTL|>DY>n#eFidM-^v-B z_?Kkeopd*hrcQ*f3WX*o2>loz+*GNa4{uh?R({GJN}Gt~`z!=E|4UX}^$=aZNrk!b zq=69CPO=B_jiS30ezI`y1zJ zq#^Xe`&k=Ge7Kvo3cNDEo>LJ@8%ciSs&GcZDtq2`l$8i|DOD@7nZUf8%HC-zGb@KE z7d#oX|2`hyijf^PTCTO!2oR|C;WiF>?{k;fm;sxT79H1@I!B3U!fI$|ps!!gUFR;3 z_K^)dt!KL<8?!&QwI>jh-Y&xv)yZAX|KAy2@&)hQFiu?u1-XzNHIZ6RF7aL z2ej!~xqO9gI6OAb-g@R`SMQtj%6k?SrJKdb(V)>b@{NLS#5|EQJ7``rs)L7Oi>h;; zb>3jR5{vf`h14tQH-2Wz-~;D$;6-3hT}aQx^*#MhaY5{w)Q|i`O(jR zQE17#P$clAfzk%WN#J!Y_c}Ey^1%#&Lnz#p9JXx|Z_nP)cXHGe&lr+oDHHkEm`MD? z>)yB@eclrQ{-WoFfFDZ0?8odR3YG$Tvu1F>d@kZATU%3$|qCY=%A0vot z$+nU)G^E9;nJ?)EgdO=Osa+UDQVV@M)x{zb-Ep*1NcVqGy?L zuOGzQIc?gHVB^VrFh+zIi3nO|ZzSN!p?uk1j6Rb7BKkE;-IGmyRLrXY^ya?BXvL#X zK4VdjB&PrAw)z?Eqs)qZSIDpWO?joB3!Sqw|5j|U3r=|?9@0;{r zmi8EG=yn1ogLUqPJaO6f=VWm3<|Fsw-3}Kj669sYnB=gtsw^haO*;#Mk{F+okV5ZC zN9*I*g&%qMv~0{xtH&0YiBAMn=+%lkw@~IWyaQs??`SfM&262lDh8CC!7ehh=f)#j z=bGmNdQ(p#SVl`@%MO<+;_w{);#5tz^wX5a$8(;s1gdQAh zjnsvUlbhUO)xHqXY~+phZZr79CG{u8c$Td=2(q$P#6>DsFQGpNG%^le#@6h7f<52N z>_zbJg9OWNs;N{{KBL8s(?{s=*cq3%vAu1&B6`Cz^W!>6XQ--h&|33VZ;5;ACwOTq8mK62;4`7`V z$|w-NW543Xj4dR~Td6f-3s4yetM;kA z1EC#U3^KXaMHDtJ7r$?A4BR9B_gk|DN}XRaW2qQb6O_X!Zwj!Jgfbx3T^Gz-`E|q` zvxsnlYF)>-lk=Lvob}IYRNQE752}W~$PVxv*V@~lg};3r5HBmf7)Ne#rsV89s1J&r zZt`=(%wDz%aAiy7R&YfxV5K}Dy?88LW*}O2u=b{k5uhX};Xw~D!w!?f^B4nso^mN@ z5cI8Y#-X;1pxPl!IT~B2#U|$d*p0}fV%E=2*RwzW>;$pxY{yy7h(H7q#pKwhwLBws z{ZD}O4x8h3PnPupEJIxtCR@c)U_S^Hwtm4R?m!=-=g{Cdz&wavuJ*0PJ56mI;d3Cb#oQ z$*OEoc;0ZAysC#v<(Cyjkt8+*n&W-T1nxP7C<}zmyoCld*Faq~02_%AmzGlaw7XrG z6*u6g-%NDd8#c~}H`k^+hd52KS(Wd8x7OW0L-b7t&T`&VTu0AcH*9XV2ffG4UFxDb zo9oq|IuM3u7rw8745o%$h_~~K1yDy?Z?v*^k&FUf1IJr>1ks$s9pQ)35#ET>*&>&B zDS$+MVGrL_Fn=0TBP6;_pwt)Cp+a!qx+Ma+22>jcRyQdAJ-|mKU*6P0SZDH@;xHl( zChqW;p&j==rL=KbV^QF(p;=DvO7bz2Bx;wyvhZWWI?Sex0Tb50f|-E2g`TGpW8&sZLsV4R;t!{ziL0%~^Yf_~ zRB^gS@88p54MItL5z$oXKZY8iutBe+6y|_+P}QUjBNHk9jM>5#rFUd!e?XAlzFKA* zG~I_{NJR0#WVW$x9WX3;0KdZ>xe5RAkMyYwM)k(07rXIwzidpUVpATqDCk- zoQLoNCd9CDt}!q8@_|;#iz*D^((**0MZkIps%32GZ(Q8C?YSXU|gE zBoVouTH8iu(Tvq>d~oB?5d+=Ed#t;m)@Y9N2b2o${zA0Yz9F#JP$Ybvbyi`h1YjC3 zT)Wv@iFrXzpXdM~#pJc}{PM*i6Jr&;8d8funrAk!kSTb`twGWZQ-SbBpUmEaz}n?@ z1}cbV3fSXW-*K@;JoIhu2T$#2!mWn*jz(D;+tv(kG^V!Ps6J0RZ$iLF3sAJ+PY#Vs z&R6nW)5ByLv8BmJCpQ>Xj98IlCI|_={(0HK9@ee_z?r>DUji_|d+p5TgdH0?{`7Uo z%mo1|G{38?uG-`Xm!tyN->?lptkTmO$2i91uai&*P8<%D)Pmbe!{>Z_f>{x;Bk{~F zCPz==OkrliOTO|~J?qxunIbmCc+WGwTbv;Ohi@s)b<t3Jbb=N&tN<60r}$mC&Qwckg4M@239~*j9DhVq z9y627Pk+Dtx`YIM2%5f}h%jKSw%QHdXi4-e2MZzOrl)bzZZ4QI6fPo@pI?Dam_V#I;10MU0f^BRca>4bK9bU`U&r^HG(0RN$nSXyuLL2$^5 zhf#3$hR5l4ikF`q08SqHOO+UErkO0ySy{U#e2}QwxjkI{?QTm$0G-=4#_Acs@7@10 zkFM5!BkFaG)Beb9gb}M4nmVn3mw~D+!oP??XJ(a4{I$ z_*r`#jrwttgRItg9z^kglJdD5%Y~KzfLImdo;aQttpmJS`V!J{>z|q5r_e%bIw>2xYft6>jjxCkaUKu^#K;6>Cbu%5*APN0Shk6ICPo zB?fF4h5-%IH+C!e<1!%S$e~6^CyJvOqTu5#02YBSqB@^s@z{AU7{?NI8?B09RBl@Q*%u=7oFq+Dfc0Ai6LzSJ>ZHt?EjpRy(b zGJ7g%9sR{%o1DAqbT@Y;?lSot_7uI|D`=bD2)pn!I&B$GM&RPLAUT{#Gr5Iw5f5Jf zpO16TNxjOtHFD*X&rT-J`q!z+UgfKttXjxqxoXy3J!$pgD=K$C8lCWmSj&z0cP?Fu z-(Rih7BcMwIMN%7j3AcOq=s>^R*Qu6qgKOHsxBdpRS=^nGyGASZCDxn>?Rh=3$*ol zKLdCt_Ng<=$_vz?DTAVq2*jkq2b`Q{eLieQKt?;@TkA4E;NZ@RsL$`g?Ur4^5iumrW#Rs z+D$G6l_mA!3W}l|dDYn0FrP?jfwY^7{i~?qUI>2eXV9kB6rQB0JXyNq(Nhk=z_r*A zXarxzw>uAu;2&mEM2hf2N_l(PNl$jj0Ukg|x4{e!6W0s@5#<{29E8CdhiM8~TEB+6 zsclLy>o>ZE8FW3hkut?1F_$wT!3hPLs`Bh)J)qNV&EBQmiT$n*&mxPE ztgMsy+>#H^8EI7Vuk4X`H=$muqTFy^{0vwBA{?ntV-exyfzVfFG+W0Yoj>UFl<5OU zUbsd0aq@oo7aNSY?>_wTk9ze1!*2{Gyf@UXBNr0wHIZ6lH%0>3(f5N}j?>AOO;XMv z-0=#w1A_Wum)Oi*wt)UUa4>6g*yGzP^sgqbG1OY5?M*H4r+FYM!9p8mu>On{;x@y~ z*?Z?gUZ z2V$H?dyzlbn&e~~8W-+(9O}?yY@$Igl~Anr2L*A5d5YWX@9)Uf>WHUU_y1Lp#JiSd zOBpdw{G9ypcfA^6BArxk2Z*A5-R1`-?G_w3n^j6}CI0f^F-`DQ-dCwZk?8fl-2i|8fw*#atbHAeC z)Kx2SXhU^JBYs405*k}+o{eC3)H;{!PR5Z}$pNoY@_up;<&|KltP50~%Pt_V@*x{J ze&Nspq7(RHIznneWYjV(pir#5|EkfFH;#=?hI+SkxaQK0(_a%OIcw`bEpj{AN>VrS z!`--97au`xtsAV|D<1pI)H5SH3DspXlYFZh6Y)vjmJ$BYR!{72A>OkqlDUjY;n*4D z{!?sldXlRZ^UEAY+l{yu{VZzhUJy~musOV-={q4}kly`v#}9g)>&M``@t~SgnDcfI zmg~_ox=&jEdz1{1!X>u?lam#lY6BAD*@L$2dZ5x9M@7_xPUF4i-%{HMwH3;Cz^80#)w>vQtuP1&BrjD7kchjsKtsJjm5}5 zrIUYkZ0Y<|4t1*RyUiyBka_9M^A*gJ+T>E7@4@IgD&dsNaBH_8ky|auI{xYUkl$%2dNuZOwO7(=NRkTrwJ^2L)9m575L#qh>UHRT5|5qo;?OUxz37#zXwpjO&StlBpq(s zpzvTykX2(k;!CxnESlsisaI_@(ZTELk*%K>(0NqZl!}!tF_{*tU=0GOY3{jC(_3Bk zfY%p*n4YJJ>F;lbdq`f7mre_PRzr#5ndar)2$g*-wir-Mte&@kix!#yI>fSloOMce zU5M?tW?aK_mEE#e4$_%ouD>|(U;-0VGyL)!*MA~a=jN_~_`gUsg+!|G$<+Xv<4?(} zpEn5-+Z0XXJL2;E4ck&@ana{l_1TTKDD6E^t(^f}wVxf3;_zz*X)I`N(X^${9AJ>G z&Mv<^pWv4Z#2plS#sd7;jS6gm_`zVtVD)Fx7IMH>)`;Fbx+e~iF{W&`l?cWmjYzvi*x3yNr+5BmYGX1-WY47`ppVOy#bL?@xGT z*zG2&H{aj7E#NKKfLJSCyK>2m36f0mh8HY0o|W@$EDQnd8-E(c#hDjH#WqgtApL*c;7#ImGxU8f@%QWaAhb6$7S@CH zNSfpqbG@;v2;+U?j%D%6@$3H@8%`!DUJoe~r__SU#n7I{Yk8J{G!m_a|4o)W9Xd3g z_TGJW)D40TijTif@gxZ24`~u@iFVe0t@8+N)&>Zlp5Ktk1HX_g>-++zxIE%d9NvP` z4mWoR(FG_A!;|F)bzpS-QC=E-Iht_Yeg7A=hbi1HDY#t&$^YgUNGF6)jjJs$4Ly|g zB;YhVxvM9A#Eg=9{P1)IVqyhSK#Gn@#&X>Cg&W51Ajw0Hq^jfy{Hi*PocW!gsyUG9 z<0X%p6(6{y%81|gB|+wC+XrA6RoXg?3Lm}Ven6oqwcA(!fZwV{au!PRj3XDay-z1* zF|3$*30OMem^1U^BLd)DM+V1Pn^JOmkjbb8*T%M!JY|evXOaEUytADh4hi5!_TM-Dz&}2Y3^i>~4b1=j#%XPB zmk@*0slV19u1l|H7)dAep%SlfSDZIP?+ru#yReC2memREx;50BI~gKjlJa7#5liPv z9Mh#qk8}1>@RVT&2ve9+w(QdBw}(^e;&2T=aM#dRlDFM?R5H_br(*W6vW>C}F&`a* zLEatpbiRd3x7J`1J%B`kA{I0vb=j1aU=> zj42tt)$FBUNqf5UMdq@jNzBFV9oxwSuh+Ji2n1f1MJ{rznjnntYmP}ww)!y~m91pe zUMJM7?-g9u?TpL1&l$!6gM}G_9J(j2{}~wVDg0%2_%|>zp~&+~XQ`ogE&=Z|D?UyQ zj8Q95)$ts!E=RDK<^#LiQ95x1&V1=jx0)qo9!wmK?_-Oc5b7h9vCE%ZO+o}U!?(DS zqMw;PPw%cWJf5aoc1l!1Y=PAZ=%gW;NZ|wPGei+OfFw33o=( zu}eNwXD;7kQz6YDzi% zG(O1@Tp|^(+}3vzE{h@BzqR0C{pq22S1|}L&eQ5#3SsYs`Zwpu_L#GAfM#gHPL)Ka z;dy9@rng{A1)vA+)rGE;QpaqffyYk-v{hYX6xx8kLsKkFuG5nJ|KmP(s~)==yO8&? zC6?9K8%scB6Q2Sp#J+ct&&()IECvk^dtz(x7%d|@G9Ib295kt5f4Rj!=mZ7k<>wNF zu6S%>kAdzKaR?MH_Pzb&p2@46S8Ll$=VkulMRw29ZlljZ!Z=bY&CcbUce$i%@|43 z@!cZCVN<)}kMkOpOK_*=d|~Jxf4Oe^l3f=UcwiWEsG81v{0zmhedHmcevGiZFAjSe z7VEtQbe$y7V_i0^K@Goi4FAojcAKCyB+F~jh#(2Fv>CCYEWWkwV36=>CyW5@pC(?I zEI5)Q^Xc^qE}H)yFcYjPAWG6C8ndIUxKLM+tpNxo5fI$sHi|t1>J~8wox#FzG{F$X4zdr%`Z7fOpYN8&qh8FK{T5exel!$Q6#hfgiB&SS z9GS@&1Jqde-L+_i#Xhc{r-U6&r(Fu8}6hc%(QTrj#_5&AG58>IX%m~k>3*ygMQ zi096#q3NUR=51jQ`iIs}M<-5ck@Wxr`;6`#-4^DC3(>#v`~^nne39WfH((xI{&dP% zW|^9VoqIQWVdW%x$vG?NUuoa5c=D#|=!x(x>~?lk9SPxxIf+w;b5>BzcM0v~zRR4diePwL_a=p(`SGUd?W;{?VFGu-J;%Xh~ogZ)Fm zyeQArud}~dmwrBdpWFtoUK>hHb*$-8CCq^Mp9@N2^m{sd0bolX%{60%I2!~FB`Uh$+>b*a23`5U4-c(X&sK-Yo3~(%mR(~8M6#CvJV*2Ur-?hTKh&72Z z2qrY0?}O>z**z9xM49hJJpHoe2HzR72g_m+a(z{mG;ZcYkvQe^7dGi6aUbfr-{La4 zaokhzx!x*td1-4y)<|h|Qz4>b^xK{2$Ct!7E>1CxxPohkQ(y_c7}3P*Q7cC#lTY-{ zhu`UpCt;*T2kR^znl83y_1v@G#5}5-BV@}Te2P2b1|Z_a5}^QHN(^5|sdcreOOk@j&hc)pht$S{^OMF#Q@A6bd*Rl=bb6y@ z#g;n!9Qq3OD=AuN(Y-{@8BluPN16001#vOkD0yFI!yfd>k$Ez~x@HB75lf0^VRz~4 zC`O`!483b2a8x_g!}$8${YFhRK_4$Vk!7XuWtYl z4a%a;&p2R#Yo{PS9JQ+{OVQNeEpu3BUX|_7=Ono|Wa@Pg;z^*+$zDIh2fm;*QE7f6 z$yjpCyYtTr*e^jj8aOniObWHiJ9=@^`4EH{vcgp zLrg>aUl4`78KbSo!jqXlZr+p$dk+(inuzy0yA$q<;y$Qyy#A}%nIpB)G|!5IEV2G*RP}k6_QI!sj=!Qe?=EvL=`$07G@vA>d7C ztJ3_4-yr%#UrQ(W%Q^_mjdA-O6r7OF7_RzeBbNPJf--4n1hZc2Gd*w!mF++|gCM}I z3}Ns%PK{KS6^k=7$oU#}t$tUbQ^~u~Vd9fdE>sf|^Xuq^o4XQ84eJ5H)p@uUb${Kg zi${@EZjA}G%rWMBwvDn#l*K^W%6RS5`hnF{z;qh}%S@FA8Lcg3O~{;GQHe>%lzYB8I_?R2VSBFZB*I~?<%ucBs!c@ZW z1xxs_q~fpDUXzCkQZm50NcxKNy6@gSchMKf{COnM$@N1ZU==vS%J!Yhk<>d*Fj$EG zlHd}SwV%oCCDAJ`e-(B2A)A`bYv>39fX$&=+_VuIz4zQ|jtSJAreK+3Tdg<>YYWB= zS0&F}{M?8L+RUXok266wx{@MFECww56N17?4z}4^(TBsH)#N&-m~#ufypx7ZI{%BS zuZ)Uned8siTe?9)LQ)vIOIoFsl#-T^Mj9jqgrQ3Tk&+h4p*y9NMjC0Pz3;|z@A|L% z;T*rX#+m)bex6@F2{r@I0SxQa%W>IA@BLfR`Hu8RGL|HfVe|I$y}nMod5)XyMT2GU zJfoZWg}*nif0Zr|A zClb2Xx~6n9@T7;|G5=D~!LE&MazhcTy@^$CC#^Hp9G`_bI0I zwu4|lgN>(W25fh@3&r)3u)tWACy*lMnCe@H+FMZgJpW#JVzx!^5zHu^A(`js(1vCW z@*3sNK$k%a4M24UiUw8`4r4MHo&>1%bf#aNFPpHgfO#i05TViG!H@HRN1M!CqyCsg zi+-(h7>5PFaew0o7`EQuM`PXvqkO5MAs6mxkN$GQi_ZZHvs6oNE`CSqVl)l`B2-G? zEBz5Eq-Frs#|OlJT~<#o@z0ZB48LE3guAS!-@HI>=g38YMkW=6JNo$peuV}9Y5CW+ zvK?@t)_h)G`56&`-mmTTSgno#WcF)k0@ zh(azCO~WgYIP5^NKI@m*+=T11CcRzEiPRxLl^Or~tRE**65ARTPnN-0JqysP_kY9h z%lRh#Lid!z?7}oTLy!y7Pw|=T7%g1r>VuNiOPuv;l}!pO8~p#otK_^>SWuP+&10Xu zKMZ8Ei7a^?;2C$QHi8U`_LP<@%U`KWINYV$dbLB^>Qe*~uQ_4ucj?8=5FlY9b3fC< zzS8saz8uk6PN_}MBuvvcW>{<>oXJhD)!Ymabx)`sU5Wtqf%fPg-wBq(0(d4IBOeF% z_4y7|ex&kX;^$hHVS!xG^25plW;p4^uSA4qSRu}w>=9WjR3(4+IKcfCGw1O`JwNpb z&GUFa5({{2yroq~Q0mR;NbwNRSXR1io*;o$w^jVL~2^>se$>=Dc9tZe*KkGE+DNw`#*pB!9zx-6&lzCvdj5h4;Sow-IMA(u060eP)$OXSwLPG>g*+&!X zbBIgWY`SKa;9)f8EplZ3Y1Sqy8=ZrL$D9$mCc;_s>d-$m2@>T9rYWSPppc}6QoMIY zK?smAZ3PGXwFq28kEo^KA-g~uPsdWBL^B>c?l%@cpomYA+HYkQsLd(WIJC-a+{7S& zA9CG8bC&yo8@K^7*2{Uyq0|f-Fvy&W9>9g3E}CbZJpQ>Yn*p_-a(JxLlLf)0YJ6!f zSOfmvR_~pBqpj^~7v`AorGNB(Sa8dfs6OZrFro@m)-aEOHmBWK>qtCSnh&7XZx0Tp z?a?wED_sr>Ph@jc-0=SrNHQ>T!!Ey<`;$tZeu=ZCX)FUehE4a$767y|+t)9Jc_Y!? z;9)Fa(e#7~E5g8-kyiw92myXNQUukJtJPH@i1}9{aBkOI<-K^KDe+O9$+yg`_dc!Z zE9PT{9OQ7t$Hr$%1OXKIAayxL@d!^bAX8TA_-E-N?$1`*t5FSERX%(^C(?IUi@AqC(VE!#7$QRV(1ugrETbc`0QM-+D~c)k2WSR98|v*# z5GCZQ1(NckH2)ka3tn5|k+Z|JSBnf3G?;yTeW(KEXpqu9W}`~mFCvOJkd{H~#1LG7 z3v_K;4Ux1?T@fKLYDJW2u(_df8oL#UyZ#jM7j9KCvl#M za;=x^*D?|3tF{IT<<&$fky^2}av1ybL}huQ){Ieq9D#CCC;$L#%8uP=?|#;v9r4a$ z_Mq99f;;De4JGlz7jswL?8r4eRkEDbug@_@?XCbV^Uz0dO^-}fof+qmQN}dw!2q~5T--j zC)_RnU{d+XruTkGa#5_EF>ln52?Dyxh&(G8ZbrTxor4v5flGDSTc{VYhjM^HlJM50UQU1(RKJ9!t3S< z96Bt=>Kr}yQe)I4s?U$WAk?~LsRU3wqwBtRv_SBgS9c;YYRscicHp4PHUZR%e`&eX zEK$%np3v&x*@T+F(Sg=Z`Ae!%w|@0R@;02Atr&EL>Nvu#Yl%LB9i#Nl+;OHo;d5nCMkjOygC2lA51ShRE!dWexb;u zVG*_6>#`6>vu-*K6-r->_NY8tTcZCnTpk_=fQ5Fa$huad4SKK*ZupLgX#)L0jWx{^ zl(j%a0WqCl;Ae?Ln6NpwuAV1P=>u6uz1}VE02t`enGFrq-d>{$3dB@EjWc33ZDG z*TWi11zu?y1hK!uQAsEv%2Ecc{JIFnm05FlAnIjDoOvBjpp#|Ly*bfS=?C=C7WQZ0PlQ|rg@*!PPMu%!P_EmnBnXv z=1^r4%$({jr?tf_`tB%w63a3#rsVxTwmcOw^7Loa>QSFRtWuH#)I5*2z=2F%g7j_; zDT8ocWbr|CxQ%O#!XFOUcDT^RQd8YS$@Q)^O)kNeG@t~c87ba}vC@Pno>p3zcLpW6 zncO*$&5b^LD)S6Omc{6dFoZC^`iN<1_`{J4-8X+AI!|!ZRHRZTTiDd|^|tljddRew z#O7$^o5z@`IiOMtIWw*;R%9Z&ks|$cbLb~u8y|+$4>=xGkW*N`O|#Q|e}UUp@SS?R zJqO+T`ZzRfwm(au3$68Q=3&G;h?se28!y~Jw1EniN3!(@lp=2cN&*AoUvreNey6q>bpv--K>K2bl~IF;N{fm)8^ID4=ZOPQ$QG961V_LKP# z7X5N!6GaQO2kht!YRpn7HPZB_p(&wGb#y1n^D|9yUGU8pLQ+e#StKbDx)nDu3Tw}*FO@xDeQaa+K2L36 zv^{u>yGdyGF*W{vSDsN8&oAoiyOxoSrk=U0MB#w^sh%Y4iFB%h*}b|3944zPE4SK? zf-~95D2~H%pO2>K#1djnGkMXT^{~J}Jk3Gr&+|k@tLont)Y;Sbl2clQ2p5dcz97Qv zbtyu2r__aJ*&KD}1eGJg0u+fu~Nu z%zZ&2*7&M8k4JoURbkmiTL=rPb2aon{W&zpp59=IE{qdsFHiaM(P3>xjjV?uiYHF| zDH5^?8waoQe8(8*X;Q-aNJLRqfY`g;C3q6Ykr&heRN`lTZgZOj;QF<*gf3h1bC?*= zd<50|jJlBMF`EGlnVUwAFO}kVxwd{N=8hZ;yB7D@0u zV{HI3trLGRcihmCnDTk+-rXm$7xDulVPH-15x2a+*vCK1=)VW5jEc` zfa28ZaLYpS>uBJT4CU!SpXy($QN?Fg!i|62X1xA@*Sl{9xN`|=u*KuB!q=8@<5lh# z3Zul5DYy$&kq3L`M%kHClnkA#d_k zGDr*wH(Rie^##2QI?S@X*0AW$S}~sTl8fQh)COgXJl+er^YU+ikCisB}O@D_1X-i{Bq-JVQbuvZE3Fj1pyz^NuVFy<>|?J0a}o+Qdi=2 z%)FSO2f;^C;Atna0a%KT7@fZ!K5}9DE0|aDOXiV#qCCVIj6Y>DWg&HT`v;XoY>Z^N zOLO%5cWHge)RUb-cqay5Q=uhrsEI_(D3IEak{J_X1>giy_)3me*sc&A;}!&n7NC=?Nb6Wl$OAx)Yp4O8?rdCzY!{Aqgmeg`Y;*mRsnpc z6#HW&76$jN=QF~fm7%z77Ke-jO<|Kag8Y$p?xU##NQ(?YUkyW3*okem3%~ogFu*mk zym*S2WoRVtd`Y1mndrj?vg<1B&@q=h4=VZhPgs5d6w`Du)HIM>AkhOvvX3jVa%TfO z+p*x=kDN!;aWcX|0vF?)bRJ9*eB@7Jz7F0!`N>e+(eQ;rGp5S_LUyH}3-fE{aYQcT zKgGEan(+Y=P@K~-z>SlbN0H-Jge?AZ?GOL~Fpe92} zP*D_O$x=NyBC3jkU-}nl7_{2x+{(>#FdoxO^Wj`o<`{#P33hUv< zYsxmtphwm4jsX)mcyStvGdl@4>U<`Vd5bj!JJXy2vUgK{-6SJc+@Rr1whvnh&O$`K zV}Sl&X1&jFC6Aie>TCM@h^^pBF#N$&J6m>hAoH6fa4;1q)0_99oMns|%my6(C5(il z=w*b1uOt}xsUi%)jcr`bKs8 z`t;4X26A6jPjRLJb{cWMmE;ekF}ZxUURGC3qiS}M@pfuy{&e=h%wyOLKiF=d;3F;c z>U4jEBcyM=7he##k211 z29^P4_S?W(#AI?;qGFPe;Gq1zO6P~%$@J6U$=+@Vik~E|Q%uuCTjJIh>fK2h`~?j# z;WiElO4AEjcNpGfEg)D){G&>3U}@-gn%?PCSFZt@98cknfK}Hv1GZyDm}HzY1DUpq zj;!Qryj<6@^t}VBaEAMw&LjsI zN$3z)XGd-=P9UL%EEY_@nj9s{)_Bp9klfXpYkm4(^^%rI7$OoppsE0Yxi7uxQ;7_%`R6qL77XE(@1%+j6nP zmLr%S;S_c})=}kRsp*(g+Xi%-6Q@A43c#xufWvwF3m7`aiA$@{zEw8rpo!&uDX?U?`hxT~O^6{! z7MY2M23WrPQb=RE@AZQECirfKI$CzMUK^#a>&pH0gH{WLXc^3*i;pqJUVB)9ZuJ%C zBsha=t-ph+@E2oVxe+Zr1H-|4<)z;chl7MKofTN=yAyumAFV3$;%$_PT+hqC2cD*@ zBY^q#G5r$)HZ0F(K&-4|&ov)q?p1flTSSpNkLKCBceZfBlj2r?hM2pEPw72#2`w;d zBQi0(S~Z4l374VYW2BbH|20MDn^bAMk3bH z>*}&RlN^`c0`#lgMs9sJ87Jf9Tdr-C>`!^75s#aaeWD;A4M1Q!JF;OYBmg37k5v4* zZHEG?l{DVI;Jy6u!9~6kVkH8oCtz!v0kq<~{fG*!B+?GrN2<@-Y(&v4Tw05h4vIQT zRUc?_4_QndwC(Fhm>-kzePvFt@SE^ogS3mb?10HLz=v?PaTtJfon#;Gp93>yEynm1-4 zkdNiQQw|aRO1o1Z+I8^})upwwkNTDLNiftzUcC%! z=ww`h1Bnme1`)m87HLf2`)pyal~a$Xa4Y~1^OV9G0qx5DbSkn+YStFK*;rXQ4UXfl zj27<_T-W)6hGuoQkmmtR;P~^29gb>^;clrrP^UP14|R2Bi=vPKPjq{v{EzLI*w8xh zj|J}*jt`n19FHlI{97l=t9r0f+UL?V{7MYSCcP-)%qt(4Zsf0!43ciTF#do+g6}_R z1nhHRT|wE8DswceaRi9=^1A_6SS`PSKyu3Wl;E>+SHIk6E#yn=3Pk?H6wUW@JeErl>GIkrSu23^dT2$XwvFpAfHDVT z$RQ2zO?!>{(iE zp%O3U-0e<9qml2+E1SC7CMDQ!f0u7hTwMZU%jCNWaTQqfX~mZpPe8pt;c0+`P^5PF zFm;s}s(EQfko@dbFA&w^pj`k*o9i9zM1o3#G5=+*$=lF^j_|Dcw;nm7C^hvE*7<%t zV!ZFq!l-QPROBcXfs=XwxGnyKXRmwQDjVBnayBesD7(<54yxU%07dvDo_=C)3y7H zYStQ`P=14AEz8HTX>GytP({J)Uh6juAVy|$fAs}eZ!okqY<*bQp1$vnp-@IklDR7{ zm~}1=a!-GyN8L#~JXa7?^XiAVO;bsYY)Od>X9QMS)~ovqC~io@myps|Nx}Ef*fzgN z%}$AEZ*FY$diIKMzU5=^{Bvh3o2UDAxJwN3vT%9WK?g~2k>+;Ibm{lYaC(3Jy>lr0 zAAR%&3)Gq43P{-ezXl12#1O-k^C$}vN6Gm63|@Zv=qAezlH`8IVl{H5Is*X$tUxx= zlMIIDZEoT-N@=Sv*I&uZM9?aK~LsxdmhI5&B>-`8b z?UhaC+Yt2f=?p&21nT%D$U$h9T@$Gul2JVxL&PI2>#Z%b&&uW%G#!TjDots;g3l1B zqfmKFLM&Hv7c9}O?~bXOJaaDC*ZZB#&E?;k_eyRZZ%DdzQ{*=OK@IfH1Z)(}`dYI#I^HSULYb$_PrYOF6 z*DO3;-&yhT*<(yf`E(YBdy(xlt^>Ik7ete-9mCk>&3qXyU6g%0n%(W>a_lhRmUnyV z@EhE7Ol_1F1aaiKMB?)xTv9mk&Y1Mtf$owX0`)GpoX$(-^%c6nPd07neAHA>!hh|( zeYZ|ab;7}(+V!8u{p)lDZIruIJd(EW7I7s75rSwY0|95#Afnw}ACQf<2^>Z_(}4{A zo(OTJih=0}N=2RMKq>q(%t?4U89RBdzgP*z1M) z@a!A}Y3AV2P^l^(`d~Hoz}^S(c2!sRB{6eMapcJ2M5Eh-Aj0BMT~?<;!A6@-`wv;M zxxUrIurCQUJ=X6@>l`9kk})&vWpEtC<(^p!d%h8y4X~l$pD`F2Ddr^clnF{K+TEIV z!L}yMoodF-bL6prs@Xg0`LT4bSu4wU!pH%hem;DDM zw?ej*RtOJ^gl)WLwUr>6QK{#LaNn&qZiYQBp>7c9ooE8F|22~wf;RIL&p=<|ntOn$WQu@r zJu@vI1w07W3%=v?=DB+VCFnCJdYBg7BrD}s@V1cw3P>k?1Tq6!ZbJzK*s%WlmVy6> zraLHw!a-Em|`{kn`SYN5sXxpz?8q>LXWT)YfcviJ-M|I@jKFl6G*)B@2CYIf_w@a^dd!{iZwXOg|2z<+)0P2$mlI|)zY0)bs5Vu!Qf_3tHQzm@ z{sNXs`bB^8W81?24`D z0-a}`CCu@mZw`W=IVXq4O8xg+Hz<)8;3v45)-;T0_W6Q<_$s({bwfs-+pqrr{`g4b ze4}PtB%aG0&tQ6$Q-*FWxkQY6eQWPWhJbA{G7~l?t*gpP=*W3&G%h^^r@{<+Lj3&T zkx`}Q7Cn{AaO8OSY<`-K$XRrc2sSK~09`z*6`uJVLH`VkFIpQ@Z~x(FQD9gEy0HnR zz8`v!uY{l7i%QAV7ic36)4Ac( zuL|$N#~@gw5b5B@BW7^$piN3Z`7hk$P}=k4_G?a{|6AG=A_ottjI4BtoB7L8X>jmt zqJ!)5jG==!<*D)1DZ`N!+qT>q4nB~8#IM|E1pIIUDs7%}m47$=@jowe3EF zh}f8jI%{!oMCb{gHVv3T&NZ&prg>3@+t-vkh5BCb?Q>*)bNh+Zfo=P&$6&L%E)aSn z$4W1N=eR>H%hO6k0kM&M@n^xr;BH7l>P>t~hg^8nN09p(Y?MtAly)B4zz6?AS{?)* zt^T*uX8tXXz3-JRIH%ra&^g_Px+6$8^s*k>L=SFsN&)6x%)Gw)8*GXr$C1P8{8+G% zMRDbBB@qgbE1T{dPiHD}a^P&e@MK7dA zFbDxW(2W=W>lPKA6EsU|iqlxvKy?Q{`m09%reCVf?PX)1_wLWd?GY`yf4j>|3Q&Jm zIEqLJ+qX7-V}^e6I~p>lr=4K4>l`4{JU+^F38jY+(&%{+Tv$s+bm)zme4gzc3*QHGYLYiy-LJ?1rRznoD-1w z>zIJI!M0ef`cCS&D~ZzFr8xV)Z>bd_RFVPY?vl{Vo7ru^n}-9(x%)FB_{K$7jev1I zv(}S;uUd7Cdb7D=Y>5RRY|Q=s(lT}k$j?qI2Kf6!(8=@}1m@}S36Q&XfTIQ<;6nOm zKoUt0Ij+Mx$&9$v01{qOfxx~HZk&wK7}QA(&kxo=PrkMApEANM`;tJMVSjQT?woA7 zz@XrhX`B2Q1eBZg_*{H+gJC4Z>jVrlJoov(SpX!OArLQ*eessFj3QazJTey587PCo zq=3KJY~E`fZ_j(`n`nWPVG!$x`gqsGr5G95Nf2qe1t6|xb6<{<3FyHYI&FHvod!G* z2(?2#sW4Ov$_&PrS`RP(1pNII&<+Wc8Jq_(x;(=o%bdh7mo3Fm+!a7y)evhhaH&J; zZ44ZfqWB|FQwZ!oU!fSWaDcNlS$J#mHe>z-CFm09=WrE#wxGBRD1(EmX5ulb(*Ov0 zD6%XAIvps_jNbD+n5%;0$T{9GtEkc#jDEZ=g9&f*K%n9iB9O2@2Y@go1mJ`U5Enou z%TMEXJQ}(w@k)@$2GMu{o-^GUGs9gFMIO}-L{F0lZ_bLh*}Ay2kO}+AU4CLfkDM?d%*Qn0Q#gYK4JF~_z+j% z(&1cIqAgzsZmUPjYQ0W@ler%PDuRSKdS@?SyfGpV$;yMZG zfIO+Sf!BR&q5|j&!@)0yL3;Y3FE6G>PzT$ z81Q%RFW#+xxfLIS8l;)%WGd0@{{WdDs0$o(Xf~%nV?DyP^GReNksO`^;z+WVMWdnXrxE4v<4ZgE%JQd?kNEzFJE< zh(73Rd=n@G2Q$jsLE6;gM^EzXF--m8tewC)LIO!bD-FUE%D^(SFXHBmfe42Oz970} zBmn@B>pOR0LgGF%5EsKm!$qD!iXZ{b zL`+=B#@*_(Gil^RJAMuUhD*@`LzvL_DYNG z#^yn`zwrE6(MO?p2Ahz%5~K))0UrY&guHUJJ}RU~%gp%%&8ve1hXk9;Cs`tAsl&dk5E!3&uo-@v z(4ir=`+*A4F$Y+~uR&FR&$Itx-MVr>1bfB<#l0`5&Ocqczn_|#C2okgzPZk29>KU> zu1Smt^&!#GppwY4@?MK=zt?Eq3b$AD$=jTDY8JF&*zZeU`Ipe zFvT|D+AR*hd)VjRW!`=?bHrN_*tN@ z&F%TWh5dA>979E+L?HiF)BI%s<;XC(iExq{3@Y z+aJn&v&k^%A<5uV0@-2G>WW`!2xZ%W2LCdB%{o&W^G|JEllpeSb9WpNr`8aY`NtY0 zB|&8xNDxZkhTF(Y%1`O_${Z+)p~xxzd)0a|G+qQUXpwq9AkOHNZXH=%A!5Y}jO)~( zFLd#5`Ar{qc2Bghp2%YjT}|R$m@{<8@7yZ^a&JPX->w_P0%kTc2&Axg$@sj(l3*Wl zy5ivVmiz76ZQk=YVZ2=_)K6GLybr?}kOf}Db5;mdvbt&jxJ*q-ZaIn7~8Tq+;?r9k~J?K zc^y8RhBy^#tSgLEV#sTS1Uw&L?V1e(;Hdv*$b|<$oFrPqM|Qx=wl+;P<_Bh}S@Edv zH6;9t%I|)bFQF)s)P2POI0bemFudY>`^y1!kQ}&1LzFYx3^MH>5fp=9B} zkXy>GQ(5qeysZz zePIVGuCY|=p}a#F*1%dAo}F%MNC%i{SSFOqcN9O5`{LJub!@BWzaF{^$BgGS6HyVi zL<(s}AZ=l|h|U1jbZ>Y+NT$feO@zvg#Q+Nk%>b$fy-$oAuVa1*-nSED`&Q*k*Sgr< zg25&~`jcZN{mscS$mE$Mu&J)^`QczX7&;@JzK^F{Fkg@F`+)|K6c?CQMx(vDl4l}1 z8YUu)QC{7RBY5*4+@HjFDGUAa^4+3wcv}5_`cuy{O8| zSAv|QW2Pv~_o>gP6Rfb{uWzx$;D``Skl0jwJ(6`{@)`hO z!TOq&Tl6Q^AjAv4-e@-lH11u7gK2|4JK)7Nconuj7s^V7U>0;9?cgQ>1BVOVe0tHm zH(GoDkU5O?2N-eu0bz2LI5t_*+QuH@cP5eFWII0RLN`Bd8W{J9iKJ<#-DBR%dhjhU zGRu6F6NVQY#cuh1-)zLxn{=5xDgmNrydaHbR0{>!=WBts&&L!!eGHaBNCo}~(*iR* zwpQE#NO)LN6X6ntYW`IS5YO|0btf`k{0Fo3$I|60unS1L+kZUc{?`;6t_bm$4wAeu zKs_(A%2#T=;iYFn6Vl7TodHO=j=B+j!A%o*xe)LcRw%?73Nx0)Ju!!^Tbfzq3eZ2W zmlUCVU-PMogV4NZetHKyLU=8tv~iaW1w|$`7TwISfLFjFEwm#SW%=& zra*I=n++ssa`%b9`Q!ooNJYC=JE83~)oa6*%nqAMj#n?&uKCHz z9*10g;$lh4Nk?BBcrtGbtkUtJR{ncS#}ke?7KCOE--u+(TB5R#GcYB`PSD9ncHKWO5=!P%xlpO zl;tg>uU(SqYaMyjQzVN2fEYcdXMF9E1*zzh{x%8!wy!xI-Es5PVZYILhnAgf$fCU^ zFpvO{^(O8t>NVydV^7Z!7{r>?ZXkrkdr7oHkwidp! zM_!Be%JeZL_5Q8v+;tI{6WNL!2?9JsDHSP{X*|sE)_Ae^{Hg~*za=T*95iC{@x3lK z6jaa+$KG;}si`4x<-iO&I}6QKp&pTDtZ1${1(aZr5ROCSRBy?XJP;=F8GH7OB1|RJ ziLknJ|I>b<)U`_gnnmhEAS+s*@VV)<;}179#dYZkJIO%hq>=xk}m3UwUMEB(K9Qg@|bZ4HQ%`Ux%d0E@a{LV?(4euAp5qL;(VehX}p2a!l} z@K&B^vk-kaVN9uD1&?KHe2N5*&*LuKQvAb;b*mf1&$k3IZD;0j1kaf~*N~`jA5zSS zTlfp;ofkwL*#=*kEMN+n8K*AdHe7-GysV)9Oi@m`Uq%iMV^fX*lKM|2He=X@U>8x( zndT7&1_1LGBk96-c)_nlCu7sSpj6>g^D}D>Y+pNzL$WOCcWe3Y0|N}JUt%gOEO*FF z!Hrhu`kE1^UL7A%-7WIsUOzxY=MmbWxa8C({t9GLi`t_6SPv}zM728@OT$ECK~*v% zSZ-ZeY0e`u7{A5{d}cxUEIWSiTO&PTJ^hV@0I&fhC~Zfh67El6f-89qX+fN(q<_3} zeBf}nS>bR%$EbnhMkq4tGfZ-R4aj03O(iZ~GM`Mfvf-7NGn%wCSzWUIy%SBr9%8MI zu3E{&K?_wHUbf*~Sqeoe9(vAGn}KrPEx{Q3iG=$4W)LXb!a^@u3cB(-l}2B9T?lf~ zxlIaOS9!TU9?wCQOg4u6j=rkKRCQ;+Z4#LSYdP=4>~i0HkIM7G7C_g&G%%IuEH?hf0)8S^XIUZ`3^Hx{!=5Na{GoU*cK^kqhwvc4iT4z zAf6#&v5x(kw2Hg;!X1IdR^8cYSGn}!@{XBCjl6PvN!*Ug@=0T9PINjHtH3>JLoLip z>TsS6?;C#=DiZ(h^R>2=57q2T_b4GRpEhPsz4uu9)+!gi@yK`S{iTp z*`(m?!-){abn%rRJhb|pJWN656P2D@@m=(%QsFRN<1d4xc_~cj{^~T+0p@>gg8ofr>=Cfadu#~lgMR|hM z@ZYfrvIZ5^eppdqb0F=FIrs#R+q@RFjWJ=NGUNUt^l@hC;jCDeQ;^#tb+{+dy~6z2 z->FfxJSpuV4leHXw!{)sW{r&m`B3uQyx0MKsaxBOO$?|aS*u3d4pGDBmM1&X6MbVH3EE7s5@ zYaDSkR2;r6-ixsTjIlN~!6mx%=@KN1;gIL%v7foIa0&T-c3cxg~_j zpE(B>D8UqO+@$pt15BNJB5&NP7ld&EJVrI?u;-`_)u2Dp9)lb7+t0%eHImW1M-9Sq9B8S0R1lbLV8N zPf}OHDCWkEmKf6=FYNl)3NwDw1#36*gcelEaydt!zc7ERvYCBY6=Wk*Z5khWCmy@E zYV53Po~WqrV$z4es1)R(V9@K6c&=AkMNGajBJ~12!DM1BHi!Qc;f7769_AYpBO1}r zWY`E5{l*}b75-p+@RiRff_O+Pp zIY~@a?B-=#wZ#%iQjVjWF`Ci4MSJnhdyY@IkVpnAV2{cj=vtLt%o{`;f&NflRe!T~ zJcFcP4J+n}(wuDK##cv+^=nE=(50K_ref_qcNV%jpgbq*P-D>uo$khxrC&ZoFrZl* zq}l75H14|;XSMd)|Ja{VaaX1PD+#ibR7CmaD3sne2(1Odu;^SLOFfVtV0ZNS&mp z5`~5>*b!ogSE8AIS;Iz1$^K+LMfXBxEv99Ul$u<5XrSh6X=R;lIvo`W9}vu>V^WqL zk8V!)$avq&Q@A)DU0m>lp0fAa=3Q`uWiVW8QhbM?4{dz*{#Z`CEZ**79BO4IN}6MG)*p|; z5zd0lzRaF8=so2D_RB#5$*lYwO&{NdEMcZ(#2{UUNEba~9p*-yaWH6VxHr(kwIqb` zri0vWEDR{mdCKz0ra;ebhzoyh{sAOBC$hlOBa;ib)&GmqOBpeKgxtFHAuITYB`lEuPfP-r*8E<_MDVNSZ2U}^9XRk z0zJ05wSv<@U95sPhA-rZUI@^wK0Tli`@#a>nv0GY3SS0W2f$-^PQtW9ne@|khN!H$TAuq zE-V+y4t#j~yHlH7@8_hPd?;OhyD$NwE>F5Ug@q(F4>pxeodW;5iA+C%RRS@OHDQdO zdiy2ssjG34HvVJ?5H4sXYFY1?@N?3;YfYI)ag%k>xa%e`474yLn;&03$KNe7IT;A~ z>rZiM0L2d>z|e8sByqPWTQf7cwz_x%@bXTpJ}cMP|)SV{T+|8aZLzX z`pxGaUFR?5^W*QZEq*2fj)8-WJ#ydt-azSJ$cNX#PEx)FOHWAY$bEdz%mz}yFc|$7 ztRAD35s;eS5BHK%!~jsff#eWi%SBfNE4d?69|;~p5x>|4t4R^qpAX@$ud#>D>brVv z=DXg-`H3_YYjh57I$kY3CDRhoDaWaSvtgRg^9lUmsv02nzZlPEb?K6BjTF`ReuSdo zX%@ujX+aW=#Mwo{x(IURN0MogQ!?IY%cRtla?7|e7tXq&5|A0#&R{a?`v5@yp)m+=jQg`k{&6AR--<R@u`ZgNk41kVNp+w&x+gmLSz`>9#Xrr6IZB_S-f`# zKi_4~tOE0?rQdep)WJ7?ZrF;EGS{1s1t8D%o(|ee2O3JPme*&$4IFB|2h}r-Y?WpR zUR%|0+e!33()(@=65=Xl>6%N4s%1%d7s2$?X$%?8@GvoSu@F(qzPl2pKkqknGCw24 zd(iZ2zYSO`xHyhdP;oIE+-g zcx#HH6)m;FcWK6=7Opp|b()-rCG^~*8|U()ZVBs{-xM)!{pJP4)Lqc*tB9FAhJx7Y zIj#ubodP*vrVuK|{A>jbDQ=_Q!&qb#-dwfIpShe9-X?;i2*O%-Q5MV4S6lLG4>*G3 zuM7f(BCta5^}z$oFF5v7$n}%V(7I$>DTcXNX|I>uC~(UGX~H`i-CrCmnBi-Y$g@^0 zdvTXHTE>N{CXv>DO#5rA0rB0}61im!As{S|k}RoiU)fcLo(-MyAR23MI%x` z$Tj8_3jXF@^aG=EL-!9d%oq-}d0KbI^RDsB}eovYPEo5ado-91{>dSAe}n}c%n{+!c_Ch=+{uW6++xXe19%INC)v9c*7T;;`<4tMyw9I6e#ey!~aBj5p7Wa!#rQ>XiO zj0Bod>*V5Xq-%@=!;MJH143kcPm;u0r;|vW(yjq~I9UyQ_C7JiOJPfqo{{p`@>3%X zlPz~jMF+=RI&CpP@_%~X-6$6$pTlu%zjw*MXMvxje^H&&HyJLDOe)X7rtmO;3%30l zKuZF1bj%ILz{FdVaQx(ITwmM5-|S6f9FIDu)w6^%e!Ew7K?2w*Nc1_rlmsVl|wMOB!QgKA=%rFv?oJZNO8pMWX52zHmeKc;=rZb1+so)Kb<suCT`gNN@)5XC$B+Icyicxg?XsLD|O)=wMq45{%?DUjGZp`owG#%9| z?vB9+?hfg8TSQX!sb0W){^dhJhGCUfNa4pwVIK3!Ub?#Lx5JUEFKF~T+BC}ABM3>J zP@xL}HZiU?lAxOl50!HTIBCY+GCGizE>b6Ls;X~(sDb6Swc2Z%a+d?kJ@5_Q0%938 zBwUwMtARbhF48Nxma*!|_htI(-LG>wcuk{<)V4RCew)zhyT*gK=|&Jj^3n*Dit9h! z>BHVFu{Fc{?zAq9HlaJ*QqR^}t8h z_rTI>$uy>khe$06tBX3Y{BTT_$I=Q?(^v1B6F0J0Jr@}NKo4eVXowa60fGdk4B*#Z z-A>UU)KBfaj~|n+^|?sha;3x9mtSdyx)4>+&J|wm)pi!H;EQN%)bX4Lmp#JMlln6N zMt~7-M$L`I!EDT4Y;uOoyJ6TrJ5KuDUjl>rAYW57ktQN2QTI*zFJOx@!4dnF^aKIs zL4TS;tm3@#=73V}z2lxQ<;O{i_F&Y^r^oR-E??Almj~GoOe|phL9=&@a#k)s7!MjZ z#;HaZ>qFzyDT;LlzeJ-c{O1=}o~VLZ&8|9woo{L+@6W9L4VB+62#c_kOPX*NHXAjGcY%LJ=vzUQmwxGPkdRPmL^_lZkPbmmx4eoqO-hZ|45_`7!4`=gGa-UVE+JGh_yPc|O+t1o3TP5!7N- zW#QER=ve%cLf@`K_2Cma;gtaSuRKQDZ-w3)F5awiD+xr9=~+HwBNtZY+dt{1hdr;%ph>%LmoZ&iA~Xh;B(tQq?qGQwoo0Q(hJ~NCenNp`_!^kaB%a--L`!& zsgWD(T;381?_&!_Jq)ql>l$zvJ>XBi*ma1{&=~;_o3_C+du-E%SE!%tP%(hG`rL zIy^52c1a?wFGgYDW93EsQ$e!#(MiGcFuIWPG}(`8+x4R@jT@ zqny6zw1g_rbzl_G%NuF?74587ft45AK#mSgFL+b!{!lf!ZosH}-))$!iH{N~^~a{} zKXgT4gT?u;m#lIH?y^oiyBqw9Bh&X@ssu}_!-Rc>=$Fq|rXs(IVJY!w0HwtkeREpI zmj+7=gJHon8aLIxigFbD;+?OQ%i%6aPY&o+nm$RKTftAcu_*t+KlCGTG!}(%{`)2$ zW`{}u!~tIdmT2^a$K|92mwt_fba?xjcibZs=ES^t`yv zY8Kz_1u`a2+c88U?mtK>r?`=6z(HKsl3_-`;_QieIm++R9)e?n4?W+OSnv*qY6@PaHU}iiVgdB(iMD;LO`VpFw>J zMV#tX%6~87m*`@v{J7TT3-~$4l1HMHG%!MFI{AN~ueG4Qs_&Joy@UWAAtE_KOn+Pc z$bcv+@IOQ6PAIpK?u@Vlim($Z;VaOE?ffM+^S}Xj4mX?kt+l|FzJtA`KwFa;X&ynv z8K$kFw_1sLImG$0dXXE88?D*TX zNwi{Qz8=~zj>(TLy9+;@lmeylJEvk}z1VoIRNq`=xz*=m6Zy>xHPp+#n=8pcD^3jV zD*ETb%X>ivot6n=dDqk=||aA>`YmE52{<8Asgyc*J{U5_l|~Yt-Gv zpFiOC7NlR0i_@BTKwrCRT5)PVS;-N-T_bw5e#vR)+a5@lsgL@;xUxpnzVz0%&1ful z#|Vlc+@1u5^Z>aIVGoiQ5rNjsV61~0oS)AXDCpZh$<|?L@!rDzo)UxOazgRT&2)MM z{n17aIWK8jsAnP>?a478zhoDk_u^m0`MW{_3^y-XhUq&7*t*f3*HxUaT&dhG=l}a) z=gr5{`JL!c&Aj<*H?V`gXXgE7q{D6Hz7Rn)*AHvLkWjIrY<_~*pQtT;c81W0jZmaL zyP(ADMdx3o6YwxF2}N4*|N3N;?l1=`qNmiT@@8VaJN|gpfslpv5rG|^%S^dArc<@( zpew_hq8&Fty1VBW92nKk`LcptZ-`4;eKGL3g6Ra`)DxhZUU|;pSco$}k=57Uljp{l zip(Tnf7AH6?@o!oEXD5xQgiEw3StYbFLz31F25l3ZEyJ+%4KWGk8Pl2OhIeCB45-O zn&Z-FDstXGFk&xWpt6~++E?6%($m)($hcdUaIM?y$&!syLKn>h|NVh0q1tp`gS=|m zNmTwAElBU=UP^H91?H8JMXU?Wb2+XT^?bD69qE2#UOldEV~1C6+)rGZm7kM{tk2>o zCMoAvhd zQ>j&|l$)Bc-HW?WmzI%p-7cO-_pG71pE@0gM1DG zwwT12^p}8VyEL%d6en^{yqv!6^EXb0NySgdE1OoXZFwhw`~pHv4vBm2xGhV$uJaygUO)vWgWDQuCg zm?S0-q=G})Loji3rpx|avU=zAV zXsE2j1pYMh>oU2u$$*)rH`Vs8n?1&)=DW6!soApklva`l8g@(XnA*(XFhkNC6@5*mGkI41w=!~M#QXT|` z8;QcBSK=UjuP19Z4r3X6_2=?5gvx_(B5_iWQbsc%1Q%F8Q? z*~J*pNGs*vXaq#&Nl=J0+8VL6n+9SbdGI^Z+sQAkf_>a$)QgwAwfBYz;Fe<(Et>eG zSM_mj$u_>oTZa!zkm!q$iLHYey1$XTOO(7Mc`iLCE^>U30IPjzN?ON)kQ{SvDO=WL zR~O^#`*h3%hq0NNutKFVps5FH_L%tLY1gb_xI@ACV8BGJQ96NWs=&aX!xwJ5 zvR=fTOUGy8y0Z);mg0{Z4nn~rQB?J0m<}utek*~maFEu2$Rh*Mw-GKOPVckrfhqVG zf4))Aw>87^(!a~o<_k>-cww&PVgd~8D1RimEevhAqtThQAr9#Fcye)R^d zR5sD8JDhJFWTjE4Ad>U3W~UJp5+a8`;uiE~!txG>#nYFis~{vR{ku>xh{mFxcgCx#4bsFfkYVlN3Jbc2EfmVqo`42{lzund$X1LcN%w|meSWwt)i@MdqEiz@k8nfy0y zG;^Oh9@-c=>)L1!iJgxf8uR=e&j#{;_>Z7*7N!;hhX@mT%Ky(_Vw^uPlBNIMj4EGU z&epOa^6^X%Cka@Lh?_HDA4pbm|t+wY6D7 zMfTn7x+@kF0fXuJFo$B7CP-4m_rRp-o8+le4!_iuJ;r~l^0Pj5FZv6%n&13(Mfm^f zIK7zohpbp;ao1fedPXi6@%tS6Uj5R1*cs4(4t6jA}ud&ohX+E~g2ZSLDOO}ZPtNQvEoZIADDA<-^ zRF{Qvexj@yMlvvlPD+`wyoSm3ubLJdd``Vafe_%50!1}E+ z>aS$BHlJGH$kK^Oe7%*nX91NJ>9udLrZHiy;bUSre7%hEs*-{bw$^O8JV7BdW?F<0 zovYjXRg|@f8~vQJE>qLnh4MnJ>+n0*rT^3n!MahopnoO6j0Z^kQ9~^R1^(1w;2bH#-&Q;6Y>dA}kvZxzR5W3={n)Kb_tqDp zpjns%*(&}iA%6qD907xj#fS=J5@h627zyy||JS@(D8O1RW_{yMNe%DKl5p2$;z|?O z#`gk?zi=V;5ve2#{(+H?@W&qt{*UPzU2WXy`eRa`+BiSc%eS;*tNz1(X~>0a1WqE% z@fXR+IFG>J8$HY;lg*ldp?9Y>OEy)4Q)xwXC1kVez;cVKFm#TUNs|8`)!VQ!*mG80 z%3NX6IZEko8@_Pj~Y(@9Zi88{!eh;C9DOT z6N)!xws&}m`#X!Dx0#IH9SzF-;j?)QsN(zYK?Jrfj!|k_?Rp29nVRK&6DWaAiZpG< zwZ=Q~kA4Ij+|O~eB7EdAQ5>&CXM{ojm}qQz4!jssXay`)t>+=zpGR~PhW4H>%h3Nc zOYM|%D7g$fdzh-rWM9^I(5_wQ2do82Mpz40wMdkKXOL{9gY_hP{lgoN2~n3!%8WSN zspF5QM<4yCPR2)@eqLGmJ+xVR3ROQP`5)LZ&ESQ7Do&VtfRI+Pjf}q>?|Tb;0fw+( z-n=(3mF-gB{K7%nAl?JC*R|~hT^Zyb-rJgX5ci8~Zex@h>=%CR%k)2di>TmD0v&M$ zUB@4B)r*@ zr<1h;IC@vn-gGZUv}Pu`i;AD6NvPCIj3pWiB_~4{U9N_Rvgx?gPe>-gVuT<%XAi(PQmwXBk&@jv%a64mYPs9G=MF(evIw4a!H%x0`~s?n=_wS>%(nGArdN54?4#Efa z8+#K@j2b4eR0?KztY3I#&r*yWKJyaSZSTVe}SIh0TLgE(upaP>$N|o zXKk-3>*vvf<2s>7Bzk!i8Vs$UF>%r;w-}s#>l%PKIO8>%*U%XsK*Dw-Ki%;^38l!+ z`mb(jH70@q45JVj2jz65F?Xf23RjJ8DLVb~=4zrAU(qefd3iG%LhCbi{s?n2YD(W%sRk+mjTOc5)_kdZ0yYhd66z zg7NAhe-?JDi^_~j?*wk`)jGyT_}9bN^tLfo{&CAYDZ0T`*1L7hjX-*CD?!wf9DxBD zFS#*ZgJpF;ME4S2P&c#2@NUU}7j?=BqnvPsnQZUs!Plt=CV%(`K0|Ye8zYmT9J4)W zqNF`(HYFCu(u+g&!NC+ibNLn40{inF=<;JUmO#|s;(Rq*3`SgaE#Iu$Xq!~wZ9ne0 zY&_LNex7CE7U9}q2j!r8Md==~l=s(?CWb07oc3l0{PXIUJO#LQ`tH8ZD+;NhKZ&A( z>wl`22z0_;pGTa^H$!bbv4RSBs!Be9+ekg_b#wW=u8aI${?HpGHc2|yv<>Y-imb$T z4{lOefon*Z5BRW60u5o^*+LY$(=LfTjl+CMSz;{I`Rt8(WI0rA#WQJ(J8 zVO_4u%gt)3=BQv6s^ju6ZDA^kxew0S3CNV_U#gXDCfqzbDv#I$PEYoyz#t<8ZFh;e z52S?X37^A`r|>vjeiKoxS(0r16Y1&8Q+l#{^y9Q?JF?jyWH_x4dQT96d7drxTf0d| zqaXi~Zy*gpj?g0O3@3&KE3i$=0#1q$sa^WNXhoUCyW($+JZHnEkbb0EU{7^c37*7L z^p-Yk(0n@vvj8_*`*IocLy#o?1Mm$JF!U%n&871>on;gg)@a0ri*57!7Yn!BOPG}GyP*z>3kFEm2u-pK=B(fSMwUUt zkf~U&E=(rtQ8X7c7r5$U-$}3jXF%<1t`=dWZ2pdzzXes>3HPk2^K0vUL%(;dqHTUV zu=%N@AMwka<|GaAg_;6giCl2MJbRAIOkcc%V7D@de z)NU|UZq_yl=6Mu6Jv`K#_&+fvZC#!V(J$iX-x`Up_lwS46`oshdIeRyr!ze2+B&*M zs63or6je7f8k4&?7fq2}gGoFXdXf1T(ieLWdn$AD-;&g+m*^*-gV_k$gj_scn(I8@ z>71Egq=QOLh@yCeD>_Jx*t{c3t_W@f$pyKCc9~%^EmdOGe&TlGFQG54zbLAeGqSHx z5S?TVkL`FBwe$n}bAwZl;UwL>EBSX}OBAQgQ_AJbF7R0zpaX;NPon$y>>`LnksS`k z=le9?Kjb@uq1i+Q!dh8b?;__1lsAWaCO^z%<~=D3)>);ettbZxvJMkh8(&fyQX!DH8v z^FP=>L@vw+Bub&|#GRqrf?gVapm zMH<7cy!W%=w88y&J$pz4Mts&=gQ$y*Fm&x|Fn4B;Tdk7%`Fl(Z`Z3Q#2 z|DnZMtgG&vLd$jNCah~cP9m8ZP${D;xjQPG#flZ%1$Qq>(t?``Q)khNOy2XaAGp2K zMppHjvxhT91t4{id4q8U3cWae=LGtj!o{_yHPt%B)Z(`j_El;c3!=+;i8TC!x~5n4 z;_j?6dJ#)ZLEp5QpCs_aXjWPi;k~Ry50TVav`gMzY4ElyCYfeMViqH97i1=RKAc z6DuSbOawuNrA{^{!zaD~b}fYDDsu_LZrtnYYBYF4x~T-l2fKl?X*lgUQC0adKch^o zY}6&xbw0*q7KvA`=<%N;RQ&zA^iimGq7R7CB$FWPWn9DWb?(uO){DSMbsH}hq5t1n z03!w=SyvfWLqnROmfmx+$RFT>bi2=I^-Zdq`NvZqg74*wXuk@J;38s*E?&KF6QCP? zXcv#S#yJoA0jLV==8Hj;<2xF3avKVltgNeZvmLK%coMn9yQ5YC9Cd$*T#d% ziqRi3yWiz~1oBv+bKkFKk%cIDRNv>Ud?3caVXWu`w3ZeERz6A>90KNF@;2DDUr<6e zDwE1|9gN1p=?Xx>a&g_g4=@%drjE&voRcB9CfGbX9a^u+Ninv)eCGcHnyg5p4AQ7g z__~Izn;ZQljqfethJ}N=;lIKuN+~@D`Qc2HbCk^_r?ehSYGpH#h;BO}gomf-+Ir_O zOKIj@Qp!y1Me)b4$q+w|4Qrl*b`}GW5D2l9h`5J#WY7a$0MB<@^bRgqPH2ZX#?mls zt^jKy!!0`S)uen4-Kp$!joB{~Rux#=o~Xx=Qi4Arxf0zAHd)$8rm}tr(WdUH@Fpjz z?uSZS0&fA1Wr0V^`VX*Hn)~Xu>2G+5+E~n3_R2!G#n8!cC`QlvlGyYAKe*$iA z&i-Jj!I4rd6)~CC?T71Wu=7L8Z zXarZuN4jdkke8tg+79zudKodc)&9LGY@b8&XyGfUx?a)w*sLKnLg$A8FI~gCeivg_ z6fbiMD6hZcNUQB0om=8gC_Nl;fMNrH$qsos{@j>X{>W(jfdeR1l%5TdHKX0unoG@% zP!5T5afeet#8|L%b>`YSVs%sJ5TTu`7>A2_m9U}TJc^OYklK19UAh-Dmj%DwL7PN# z8rsDe81923&IUApI8nsN|ht{}mxStBo3xjHg%b zF0|ahyK0xDt#8EKEd_j(Blb;s=jYW|_O4d9@xu)12hMJgQ1(L_R9P!adC%)#j?XU2k%#a4CjW>@klbhPn})0)m~}@tY$vUn6wAo-S5;1_nd1!E&V&%WEA{hM4x6b z{fmRI02+@YoKaCm?QDlgj*UoMY*`;4Y|H$9*})idv-&u@sI;EjcS(8!0zS7os{njC z$$1K^lEXXH?aFfBmFUu{g$?f?l3|vdgW$Tim%XUb1vjOnAVg&@eg6JbK!Jp|p*UE8 zr^3Z7wm~-Zrhbl$b>5sXHM1c@u)v&836Nfyds9k2j*PVUEAC%dQ;!uXHXhk2$ojhR zd+ct5^OwjN)$$riXl457jJRdD)d-Gw*1u*UftThqUBcW}%n{Y6U z%c$6Vf$UMjZ=o}emQsS1cm%687t1AaETA^$Y?&4~U0b&qo-UMtlfi;*I3XJ`2*$FK z3u;=yN4@Be;Meygo@A&Ewr-qfcv@#dr>MXMf_l0ps3LSfJ8{Hax;a0{$u_X14rC%z zWHX+@4atU@5QY#o&;H^@3CNH;!kLB_5xk(zGTw}lLOkLQSA{k@3qc2F-%Y8II8uk- z0cr3%#pIHMkp}`uyN(>=a?x!R{$+@0UC{zmWXZLhDD9`2UM&k@Np3+@#wC%vkwU0( z5cUqa5w&Q;E-Hr#jghV5PPec8JPEHr{g1a{t#Tva-%>!ZIVw0C3^|I&Q1RA!0v=yS z{<%x$1`8Oo@KyKArdHJs@{PyO;IBm%)eqjiDP~?cd)F(?CLhO9XplP|cZy9^Mefc% zfT$OOxJ#XA#x_`)nwBLy3(bB(<$@!j1w6pkq6J7SNj*I3BDx_%)fp_9%-Q_ z@_ny;LT`9;ZmBT7_;45xwm%zakm%A&1BxNdcq%W;1ZLZfrIG>8Beme2yZ5&(N2Az- zV0ek>cdH+xioLXs$fUEJr(%=f4j+*mjg!y_f0ReS_fM<$6?T0nPIh1ci+uZgNlap= zLXl=48U#NoBrbWcKP(LMZfq{;V+`H}qfj3L36NMB#&Dw!2jIna-k>@YG-TZ8{f}FX z(u58dc*o`v;6ka63ml1h$E+2O$*Q5?zo$$W_rtF&?;DBV2(!}(-Ohn%-pRM$u{AFO zKK|`-I=nUi1q>@DJ^g|8wCCl)ck!ENw!!T-Nv69ax@E;A0c1@aB>_k##Mx)m#r|{o zPkv2e)1iKz17%{({b`+t7psuGVyNxVUbM>b%~KD&soPzWS=`zkUi!%uno`|R%G2@c za^y7PYob>EqvKHqDKnA0csLUo{7_pp!@`3P3 z9%L%-Ob-D(fC;)eRI~nWV($y($()d$Tv{WR71B6O>AM0<0coGX%-T+_9a@+YrO+VloiF554`KK8E%vjrn;W?JKP*p2{ z?mC{%4c~>f)hJtZ2Et^deWwFBb8r`s^iWh{Ba>yKmIw2?vTZV7+1?{Nx4&9ZnO~&B zS6cePyq<$waGoYDQp%-DL_jkUuT=EL;WQWK9mmUDF)xfHe8Al9a-Ds9M~>73JRQVY z+YQWPY(B1hZUA{G(T_8iMG`2aV&!2GIyqy1-i1!WO~`obFjNUlVBP;dk|{{n;idCM1OmzRiE(=I=bTN?IYei8~xn@!ubtQa{% z;LZ|QBIl#KE)1dzy8O23>xOfyMM<~WNbDc?Zt@DAg&^_(v?p`vFtnpqbY#%_zxh(v{ z_xFr>;=lef(F&!Ol3cwVoP>qK>+w&T@GzLs3R%&sM%4lJ8-!3m-Iyu77ZV21V5=wk zTgV6T4N^cQyt<|(mhtUnonYUzwbo+5lr`CsMUZ{pXGZq4@aiXHY z!2veLy-pg`$nV$?YM=0`)&f2cvkFz(6o18bXL z0TSmI9^fPx*4Fl<<#cwYx4|3VbUq?8Nx4J&sekkRZs(MZ=km%+L@Qeo>@G%IWGzm_m(^Rsd$_NyUbT~`_D(EEAMfW(g zE*Fk;2UY^gcCsT_d~Hie=SY$wC-x$7S@KHbqE=*2SPm}cHP(^;LLH{Z#U|6hYYIU( z9TVD?z_jx!;`kC_w^=XNF8`atIa|W7aN@!feRLYt5aJ` zZ4u#^tK#y7kZ07`QXWx;dZNBJ_ck~X;z}-rST8~PPf?od!{W$4Gwru6_lI!g9VG{Fc0Zw?97_lBUXLK?@eRup_g@y&@sfwuOKKHu%p|QmSQh=@Sh%DZPiOOa z613}AEi+scu?1K^83K8~jads0KLkovgy@(M7lfd;ojng=Tt>)SM>PL8c|jlS%9ms;%B4;KUdh;69jq@3sFzu&VQ!7ZL(wB+9cv#5&QC~Obf;xXbtVtaZFnv+-( zpLnnh;75(rNr6iG-vm<-qWn-0vKXKzbJUgTFD^8kb}T=6(Xef%`WPLWZC z_tBs06T!p!_qgs*DFWY9DS>dr7DX$jFM!D(YODY0_3q%e6)n`GbLhB|CEry+F>B@c z>$nFT7?)J|NwXBXE-H{2V?ms;C-a`|@M|fYX+0X9P>~`tas3?PqZI0lc8nnYDFKo3 zbN1pYeT&@YU}=K=PRR{x{oLCPplv7g?H)z2DC2K|NpK%n4^9?Dj?^!aLN60kemX*W z9q!!!Dtj{c^)BE54{8nle=>{aKC5=GAViQw8O|5^Ju!Bw#(HxmgQ00jQP-Z;(1oR& zwS~JB@n%2xq%k9526WT6Hd7;a6G*P{H0Ei7Tx(Xt39@gz zpGyWMWoY^9LC#`Aa1-3z$xW3SA|uamiA$!&cAW2w!#geSmqVx4WXer34x2KZWx>S~ z5A&brvubeoU+JZ)s6mI$HW81;7TdWLtqNA_s~C{>__;{Xk4z@@)I$U9Z+!i$9}nj6 zA{3}rbx4SpVjAnN+ecWBf_dTw7#D~23$Nsx-f@wd;1Wrw9^ng(h~a_e6VY5wqKa3b ze)gqBM=1Cm_toYh8-|}Pdfalb3#ET^<0&hXDoOj2LUFMu5=bV|xrRR^qcT@-?ydZt zIa`ZkGZor6vRN7^2xxJKrG@e_zcRzoKd&Mkv#2XHu=HD}WR;HXD3cWk;5R@546!JZNno|OjrYtYt2 zXHTt(NWX^{)QS{*W+MtJ396CDb`!dkh+pw99d<6Nx`Y(UkuqB{4HU}n5&d`~Wa2CE zsK02AfrUwOllgmL;VKlq8we}73$C3_f6*3Gk=?HR3|uE3a9FzhI)`jlB3{r46wKds-sTFGZh(laCg#PrkiY{)Ukz-iX}D1>9{$lh zHqNZjRsXGYc8vdhSVVHh+^CU1I~Hxlj+0lb58nSLS4fzQ21*bKVOpB8CludtrR2lx zvkvl~wB;x+0(nfrN?K>~2@7xw?j7iOeLg=rK4ahOIb-;cA*#2)hU$(ipbr7&oCMXM zBt6tZ2oMk?MGMIwz<~LW4lk`@VQTsuo%Ai48bGB~C3S}cAQ{EOaLY2?JR5>9@u~OD z7!#q=q~{A4=eD;t4SX-co~VYflR?+hdANpIPWrd)r!0QAj=A)EM42?1#PC4HtfXQx zg!OS<)%KCh`76p`-|jghG9;aI(ymT?di&uH{86IDz%Ax}BVeWZ#MIgrVZ(;qrCMtA z;cEI!tC_4P4+E$`M_%(*3x4}0%Nt+W2eS5hK+hsLBSj$etnn9%n7O!I2*oTIKQv9# z4xVLq>v+XC1**;~B<1A%y1rW57`pqIm~)S>%l<#~tTzrVVfWR{{OyJU?}nf+T5_D*;ByRiy4WN`+zcquR9SI;m^F+qN2Z7MIF0 z`kLhEzmKkRZ!U8%W%;>ca@MnB-C%jJ8iZv*fXB0LtRuhTQ>HThh6Wo**87!_7*U(G2aess4H8CG?|8BfVVzIi0% z-6gB>2PrOf!7(l1sg1Z^D#Gk-Pok1?tmNGLi-FSxow z^%=*PVrUCv1N|A{`q^DgKX~u|u#{m5X>r5p-D%Ti$3dtvwusuccXp`|t#y~}(B2)5u7L=D@oogz}u7A1+FNl!H>dzt9`u_qCc@bL8C+f4U z_=Q@ZT!anQc?6cb4mZ{zJn%aap&v2kdx#p6@nUPP%KK_HFqc_GbS%}-?RC2w9vhEk z>vVYsp1q#;g?EK#%&Q@cv&R*+0h1wNZVBPe==;KS#7HUHn*BwnoIhTWXTvUJi47j_ zhl6PeIhgiI;O+Pabnok&7f5wz4WN_=#hGjJXJ0#pCI~Eq0`vCcQs=^J%>Bf`g`#iC zSC!4;f_DQ`i3#!m{uIe#LS zs;iC(jNc;t&6-AWFvH@Zu{nJK4#A6qXk>J=Syhdff%W(G8DGD1ACIpSSj%bZwS*<^OL)U>UgU0KcF(C5()9j?v z_|}FbeDPFQU9?c7Q=w?#iWVI_5GVxv0&uR}NO(0aIdoQDv@cMS-Gy9@2@9mrk()s0 zk+j`30o3ZLrKk9@U;xkPcjic9ZZGXKM8B<;5U498hj2V@&+09T!AaXotwFAL^$ai8 zEi%)yiivCn>dBIc&$^Jvu(sCwVO^t=NHy*BQ0XgYhc6GQv9h)^R(EF~C^|l)6>&6X zDI`^ws7QxO;UtoFH|^DwvFCHO_JB!W^9cxT5vmygDm0a(b=M%&@-2Z}r1aR&S+wly z!Z3j21z`|bc15mDV!Gq|p_jEY7o+n5NhHmhJP^BX%nDHp+eGw*7!6>9L@PXH~ z?Zc$UfD-{FY{5;96o)T{%hE1s(b0T_jKndTD<8E=+Q0a6O;7wN^D4Xb?--zXMdEqrd(qji=+@fuT_Bl~k zZ2#;@o&HUjH%+22c9!t~x*GP?>R%_>jn8L_eSSSIM@-klaFN8V77UQx$|~tp_$DO2 zR;+R>BX>(rQK^j4a&U)8GJ}cR##NXXQ%s}~+xrq~(i=#-0E-jSYq;_KVS*ddj5GJR z!3(Mh4UoKjRs#ucgnaJauM|G0l#MX)`f ztQk0lQ(OM>_{eA4-seVbvs*=bw!n*Ne-)}S_g}^&$*942{)~d=Jf!$@?X3$nF(<7Q zRA+$^5+^4xh|2y9M7Yr{b1&!#tGoWNGgJwH7O(Pq^RFvWY%K=+iZa1f8)ik%!lcL0 zoD+w-!oEf0CfjN{O1dY}5PNxS0L1AbbvTfK$pfOojT4Z=w)u2G*>VmVG@`aPwxEmc zy_4@e?FPc8x7j25HW6ymcr%2X$<SKK&Z^S$wz3{gu0s z6zlNtFxXPUkOzQ=lys0(zsxgYvj|D~n+;ea`1#*qs6BrA4(#-sq&|9fC-}13pQif{ zOXD9jk6Z2=`ORI4@l8^b5O|zB3=q5h)|4vk7@exr$)A!!f^-I9{(jK2WZ2Q4;uW|W zgtms*20~N2i-PRDJeU^q;B93U*o(IUh!M>eQbVD2Jbrc=EBgzuP!=TNzq4G%qdKop zI5k?&v)tef&~2f%W4=!8Nxc+CA2(`(UKORa8u`YS!jEwJ5au4`cpFXJm#?~l`u0^D zc<@U?B;I^jD< zhEQUw8KbYrKX>EUcz0Xpv>uTlf#ncWrR;Pu2ym;xY@8eJYx)rvzM^LPA>aK8zN1A$ za-7wwDC2G!nuy!|2)cBQ7lEFp0J*U25Rx=_L@aAI_W=qA^`O8z08qvK4d5NPLhjcj z7t;}WKqhYC3ZLg5DV*Hc>F zUV4{y;uc=^qe#RmK*r^@X>eIn7P$|?j&dsAE$$ldUu!zr*f^L^@PNB6Yo0w*oYKht zx(BEOhStB2VA>%HIFR$aD>JAja^LxJV*ah0XUie-bPK!}kuHD{#kwf_s{trl9Lf2!_;j^O8!xqK01fi6zXdM%UG$+P_8qu7Hldo17i9 z-A{iDM(&3vNFEkEB?!A=EPMs0mm%PId#3yPvO#WOC|MI~s zPB}AQ$7_Mhi5u&Sv0N8NHfCkWL)4hW#WZHZ8(tfpMFT4;i*@b-l~G$^-!T* zU@^IX7>z$?2&L!95duZ~T1`zOwh2N<5eJgZ-2XnS`;!=Vmp?yU(ff#7Tf$gZB=bpO z0^OheT~3PW+v9>j^wV8(6<3}H^cJ;3jr02Sg9-;L(`Ns+5%0$-7bQXM-N2d(e4mDZUUH-59uJSQ?iI|m10SWuc^Yq0lM;?3IYdv08Ei1*2 z+8+L;6P`sM|DGoizAid8^i0jcojEa2R?n)O&vCJB(Xy|Q>(-2U@dzTt#r|XAxxh7` z9}{6qcQN?bOF1MWfct@<;z76&6@>-rRjhrsQdNdra~OpoC*1{>>b7;nr-1UAVe>t~ zd9eqEP+KXEWRhsJT?1;Q8{9eGQnR#%qSimQ@JT(e>>#p|{iI?_?9N;~Fa31WB}TxG zky^BP{z77i3M!jk*Bzw$Ee?8HX22+btn}~0E6et>{a4-)MhXuQR@pZIy(;ap89^$v z!aO=hIc{L)KMGUKvOk`Tnu*=T3nHV?)>lYh`~$yC!&9f9t{o*mQoCdc+w;MB#Se?UjyRB!B*kV#PsOo%`t$hH#oD) zHy@~lYeZsj>|s~ONlHR1vCNB3Kg?&QVC})o8?e*-{J-%j%1=711yPVoTnMTxaiXa) zhjf*zQ?!753c20mv7-?}0;&XU|C(5N3^~9;ZixEaGv>XbDbPawih1jxJ(c=s3-p<; ztmWn|YG5CElPydz@6BcSY!kNpW$>Gu%KM*}LOFCaL|nXyrG!P%L7<(n2S9dKPfu`^ z5P5{JzR#?Gq#QTJqsgnCI7NKbs-l8zuUR4-6x}&HJqU+H$zn$7OG4y6vz4s<3Sx+O zNcT*}dXo`|-Lv9%<}e_&yy=^&Wh>ZtH0+FM14Gb=oe{=MSO~1;=}pbmEPeL zi27Z#SwwaXV0%AP^bH!QE=L4zTi?gBIxa;(UgrF#9lFLq*`z;3w=Y$Qb(vpQF*X-A zU*}$-N3Qz2P@k77mebIFifMuGNo0`#+H}{5U4z4_7`mn;#_Q#;U^(j7bSw`jF*@kW zRNOU}x%3*R*FLj3hJTg0wOEvxiLUk&YW8*;U8sJFK}f~D`9-?8QO@n0zQaD>#rFn9 z22urf3!k2zgoZ5qfcul?xrP4MDng~tg6^8_GLJ_&iR>q)C^c%atb{Rv!6BPvznZRC zOa%mr=<9^G3LeOAo^PU4$6tFxEUb_`pYutnuU>d?JCgf*zLy{JcVchFoWvU4uhQWQ_Hlu&F-Aa2(aLqCmKI@p=o zVw@W$F?$4wk>;MZ|2CwrdsYwqEOQC8KL!d;=NV6_LN(}`WnP3xfjZ~x7U6m-n|Vk% zgawyHw15q?_}r-XK1|D`CPvcEY-dE42N^^H0t;N9<w9@WTwdoausxQ@dZV zsG!w!E`1lqEq-c>!6!@YmhDqlTmP}OrbVP&FlW5yp0OrUFuZ9jk=aDVfP`(@sPM{g z17jgMmU`W-|Dta`G)mD_Y{8Z2`R|!n;P8#m#WWoMIIG+W*~D?_uh+Xd_^9|&v)Niw zYa*6P_Hce0Ql~^)f2K)Q>9)ofcm)kplBfw!8zq^rnxRjJErz|~^c+PUw=M(#1$K3`~72pH13su zlYP+P(~NMVB?{3?`w&;+IZj8(DB6QXm4>R4%&n+zSXQ2Qyde&hrD%OBFIOo4rG;I> z$`f7jbkxwUZ|VqgcI$XoB4@+p2XW?_X~z)#_WnA((;i(*zkTuw73 z>1QW#+&C~a-adVWx2iNwV;?Lqsg&06#To<3yl-xTUS%6EX@-Gm&a&Zs4zu);E8WTy zu$e}1ZEjIw?#jTSXym7u|NR#E4>BL$p?V+!X9h&gL(c+9l9VQH9I20Kr~#_x`$c{z z&C3uAzKY+35y`8*v%l5f{+{tjzfm5aQBs1-VD6T&@Lc{j!0ISwBIkX55fZ6UhxXwg z66fdOtAfJfcf{p#^KbLhy5qJ>V=GBoONIbdRJ;xLOwLgF# zfQ_&l25?cu)d~9o^CdW4VSX5oua2qiiei5QNa#GGK?&Y5LeNedRFZ?A%4)>MJ_V#7PTRT;a!^0=#oWlqBu=549RnndV%m@R z**0NpCA~@Vcc*!hm!2Q*2`TaX-HkYO0H)PGq~CQ|vK$uA)wEr(%W^U>nhlDlmF zrGwT*WnO@6aNKe3BpSG|BX!O1m=q^Sf_SSm_OP+2eXH+?-Lc3zAd-IPOhZ1ck1Na_A)9lYt{>$Rc+s1yht;GBB$kTzjY_z~BZKO*ZXW3dksj|AS;We(8 zU;m)>NV{FY?eTqpB=-kiA#xK7ziC9HY}~X^uEEmDqc~12)Iae*>X}fz#9=tC9I*PR zh44=qKPtHFU@vDI^Jhg$;r>YU$A0m}M09K)__Ke!25Xqmh9 z>5?7sZj~<0-<02qQzlvDj#P4^TEWpCS@ z+@4@9-)H)$7i+T+Wpa?UJS%4NLPW#ldwxxn__w`FUjxQlr5FR^@?K{_7I`nQP;rC- zFud_9X{8I1PU698smkF{usaI$QYbAi`{UxHCE!)N#T=Jl^*k+q_~Pa)d(!OvvWUF1 zmZOb_6*b-COCJnFtJeY$waw?tMb81Ft|8V5@QwmJ!AX0N@gv$z=20` zgYk%!oPf~#L-?J8eNYPwcb$HAbz!kTI%Gj`F7Oj!Ob+&F31J6kU&C#fpNz`tIjv0} zwWBAssTwivP9R6hOurs=@A)1({S&XuA-{)R@#}W(&MWr~!o(3-;&`I|T!-Nr^aVg} z3d1{rATd#kP~^m_Ah;Gh`4F{#f;b2MJs^l?aA*AQ6SE&`P&M*HEAG|M%}E6PrRn&- zd7<^FPx9!0G4&Q~QGV~&xAaI3AxL*9rF1A=f(VGf(A_Q4p)^P-ARs9rqI3`4Aq~>q zpmaCh`||tyKgaV1^k8Ps-e;`!Sp@`=S0H=f7hBxx2Ao>4D}6h&NWFbKU~Bn(SQnKS+-)d$nr-N)O<52v8sAKM!?2)wGN{I4Z@V z@LAT!71*_Gc$6MhY<+M&kIFmR_Dv1g%eKT&T0T^N!7(or=)Ajh-$x|_SPDOjIsPOq zA`sd9;xV#6cS)F5Jsgw<8#rtgC{(WCwl-|`y*nTw$%Gzs2l_Go%DV?PJIaqcB?~vX zP+d1%NO-3#4~sverV%`YpAK*5H+Ub8N!F%&<>KSJ;pOO=?#G+qkr_I~sSWiC`D-adea(+JcZT|l+!6OC|y!Xua_fOW_T736m4VX|}Zkwme1jYUWZWj8&PvpC4K!Q#chZ78DSuBC1 z!b-@vM(MfTYTJOW73Xsn$CDH@Q_iuY^!n$jAbm8F6ZxYSDEx?59vbJqc>Bn{OP0lf zQdY_c@Pu8{!aC&{EC{(4%7}U&aBW{l$cz8o>ze}!=|bp_Si&Cfasd%$t?D+A?EW4A zV+tqxCbaA5(p$0H^A&mz9*3=Zr~2VY(0tB;(Uq(5yr9D+j$Wl#FKyS}D5MC2H9mm^ zK90T=dZWrbKbh$9SAgG*tbg38I+Ri`Ks@IDsAGcFtaC2~Y&E^zE*nlZ*^JRu?mYij^7ph`H&2W1?Mr zKR}r4im=|6D`Zaen0pY(j}sm+<1%la(!j1Nmz*y*Z{7b@v}g&G z$%t42I+v&cG^tb%6de!w(tUP=(qEomf1z9;TL9i(3_S4;Wj|SPyx-32{*eKdt4LJk zWwJP!_-@a(+t7&t@o7kin0LS(EUSg9`Lou{0bu0&U9VK(7us~Ly#663NfP~R7PuxU zca|U7f@K1I!-U0Mf9ahi0G**aq&G74UwvnLd$Cd771h>Yfu+h+rQ8C>IxF>+q1jP# zqb(M9jA^hIVBJD8f%QxxR3%MduC(-pa%llHtoGW}p$AmUYM8)!(r5b@3BGdv_}}_3 zgVA_cst9)&)+zjcthnk>_+tCN022GJHrS-35qF>0SLQ-4?dbr^M142sd7JSA%GN!N zcn&}H{QH)$nv0voGiS&(EDKM0Uqzo2nuQ8$^j0c=G;y)B>W)m zP&b1cQ;%UBcCR@}?m=zzmq#E1o`hqOEMDL5yp1-*6~AR4%%MiQ9qQZgJZP3ExNGDJ zp3WJePM;OCuX zG<4ZwX#hx_u~=2s_F31UV;Kxv#{{bLe&UNa~= zg-A_6l)!l z+&2lVFw)#r?iHMf0r50l$q`q6D+`)Z1H>*K*8m4h5PW%f${8Z~LWdN}ih<6*Q-B0LO;8ScqSPiO<@wVABq?;fBln#E9g;E~~Rb28DU zIsj(Q_?rY_XF@NS{%ls9z#D@Fx^=4`}SEaM&$o5GqM0q=Um^lBykKYN0p#E z%att;L6hNIQC43_SI3&>dA6IU5V%ip=5Rb1R+afVFc+8WYssr#b*IjyMQC)=BiBsE ziZ3cGapb(R9CNG>dlLyJ*5-tKW;<*FF0@+@n7YQK>r!^}BEpe44F#a7k#@3eiV-F) zFo53Q(0%mQ>Ao%%+zHr7zpO5j@Ztz)XG0b=9p*e%q9h-`v%T_NWe!>(a$)>gUaJaf zmCuWjt|Sq*>f_DJVf^3j(Z@?!_4KlCy(ip#Q3W=U&7#AJh;+)mFZ}N*6=t zs|K-JD2%NY(5?q2KBb-+9zi*g^%6WWYR<3ff+R>lHOfX&I{>5yEbK28wE)Lv!!UXt z+OL9bW7*;@wIhbwasxbb3_R1xKi*mXPPsdvAU(wvPgut6Hz4PMGS94f%&Wx5o~Saf z3}zl{9uu+;p1xqKi>3WqlXHD=tZ~itAA6$VKhZ#gl-`M%+?{^-ePZR0HS5XjR!K7b zMJe}!EU>X`tunbmh%kMEO8N1Qn5nZIV~B_q@s4zy7|t*)$0!?GVdA;5X#&r*a-ix* zv^RXW-)Xma@$Z&XB~|Grj^IEwD2ou|3*sVN`T~Qp0nZiKbsMqD-CmjkRp%&+cnv4H zYnf#tX0;JXVQxaiG=q%9Bice<9n7X`Qd?}4g%aboR#Re%MH7OKAd#0E7d3IEkwv}8 zcQX6I_jHKJ_hf=a5u&E#(Jv#l<5}1Gy<$$`-}c%F#Bo~%$CnFaBBf5TVR4?rG&VL^ zGxgIwVnJSU@gEO=Fc!b&BRGkEh6KXe@~S(ekk*C-f|o)c1}4z~UMAT51f5GPDRqjoC^YwDjO(iPJQRw+ zrq8;lc|nxQUoQ53qk0ecalSxq@m4KcUJ5Tn#n?ySHlL$WT1m5bGP5I)^GV#IFUbg{ z%Y~yc+kxTN;pXiSxFm!s&ht-gAn8-T^Nw3FkUjxT5g7^)#t_O$+IQms-H?I8k5rNW zwiY7Mjh7sd(qLf7@u$bc0?%~q@@D4)U?6rQwXfVkS5GB+hBxkqb| zoecSP63|^P$3CA1Bf>!EZQ!A2wj{4}b<}96;Wt3}&BIf6g?jlqv+z9*nI0oC<-L4c z)c4;y+#}AOErj9&!Plw=_P*) zIoHGerD$Cty#k^wr$67n4FvUrxfbPhTipG`sCCj|9(N@tC`pqqfIo)jX|4T4i7c_- zeS?PUf`{VnlxBk_ux38$6#2ywlYWoDU!*TTZ|$j+v7STq>#0i6e<4A<86R>Kh>0U21f8U3vqbjs z;43`4;8Au}d2$eR)oXaWd|s$$63#pejJx+p`XH!Lv|N_%S{Ay$>{I864DVfGLEuua z0sE4XLJ+s<(9b`?@k&?J=6MY7Sn!}XZvGES26V5E!C`(uTdkwlDT6gw6+kZit+Qxg zIydVpBNxB1baGnpfoKz0YKM5N%i6x3nlXh4d1Wy<)7;R>VS|B_!>2omGcsD0;$N~2 z*1QB4C1R#7BkxP>=+lV;s)0mgLLZnK(5_p+$Je?1g3g?cqOL$AWyf3Rj?LwO#zqXb8rLKYh$PH*bv=cvU`<5bF{w`vPJ3$hMz(lMk zxki~?8$kN9(iu%nc{|J#Ut0_^-k*=3H0(X|w5@*+I=5W2EiR zkB?1*w11ujB9vRw!1!}%xU`FAtE?8~#Y3gD@Wbdg|HlFdA%)KCIXz7P+Nl(k(ASUX zme$=qo=tyz0xiRr+CoM3q7S^o4k@w9kh`mQjsWr!=gQ0DcM$OlSdmQ%ZqfDa(HqzdGh70|0Hw~gHaY^>WqZ8Ag)s}*am-%?*IdUR%RoCpwU9cTTy)*CifuYL$oF3+&yrA=mG*~5wjz0g)) z;wx1ylssdcW}0@{!R_~$F3KSu3w+}2qU~*kG^l8^Q%eqxV0rdER1rKE8%sZq`ojnr z13JZe%s{2^dQw!LP_m6y7R{vZ%YH})bEif!L=i0^geaacXD@js!&_Hx#$+poPz z{|j>2f39MDp|n><|Jp2=Kd=G0Ft$*^QMc4}`5l068Az*ex}Mr4!6poTR@2{uiK)N*q~`Q4uFTcDY_ zS1=$j?_pZUzetRyS)m){Wcx0{KRa6F;Vg81iBr|?Opnz+ccD}Ch944s+kprv6wh-c zMep_s?oyh6pBT|RDaXIo8!J_B`T>dL9Xn&&H1M_Q3wj_ijo&?5;G*9JszJQnkYZ5I zyvnZtKWE35s2Ly=94j%})lzLG8y|qsQ3b{5?^(rENPEMAvfi*WLNAV6Sg36Sh!5(I366eA z)#Kcsr@N+uYwpK>fFPV}lb@0HS-aY662C6G1-K^%ij87j`>3jKxDy)dM?8zg0_zbH z#phGwdOC)#E#%&J27*+-C@>T(!hL+f@xvb!*CyduUW=sQf0)YXOeS|OG#Mh=u)H$UwGqy_x$NGIg2mKG)da5O z=)eR3vzmjf>RluuCY#8aoiu9u+YSGTT*JCF>sr) zz&e{MnC%d+gv%3uxG1UT3Hgt|E1v`X(O!u_V3_30^UuZL2V#%#r+xK#5C^v|d7K$~ zH>=i1qTImo;53r$HHdQwXEHJDcBJuEjW+~dAB;0dzrX*VjV4~a7wxv2N3 zpN;;$l6|S2HeU|CHQ!(lGa#w91ngKE+I-;*WTnYvhbg0^2;$!jfBXo1UUG3}jTV8y zaztDvwUYH#<(uWh(q9BKVoU??C9{$HiyXS}5Y@7yhd^cAys=7Z5ahXg6-K!L5k~Zm z!cj3p+mV6cHUn8}J(;MU!UYJs(he%q5Daopg!@ozNAUCGj--=lGy?n#&x>_cW60Br z6kVsW-2!|=IQr{hp9bmdY5prex5GitR)Z1V{iZ1NY>2JQ7d@;Cz)rq} z|MXauD=-$_l~IQ0{X=a!gtV}~?;e_?6M*ZOyznnT4cC|l^MWOBy_eQn!7?>Gyb z{prc0BWqETHo+LaMm|M1Zy`^U4UWITPIk5}m!?*K{qC2FfUHky-vT%fv#mO6aZ6d5 zS7lR0IK_cfc`^tUpY+veXfqZ)ytNbz-uV$}TT+ z%ADw6=_NWV3!u^>3sNrBla)W?ZfCQwvl0z*!X|4(1WF=~mb6>hlB?$$V(t!EV;D@%6fA%4SInmM@2& zQt~B|W*6vs`iMdUc|CZ4^4{UPa{AoWLSP+c`;z|$&jQ`-hEt66a3H=&-tQqo*89D% zumv6t`jI@!H3P6frT2U6YJ&HHbfIrrhQl1+sB~L)C{b?C%2r2Tw$Ty#LO#krv+sQ` zQG7ZKK#KWQOEq^?Fyzv(tGXB1K{rOG))@XUO#A9!T;i=J!Z;W%IKL||&RNm`Jec9}a>~!Y(_E(HhX}a1VNSj>t14t@?)7gaT zIOx;Q!;#GDz>FG?61-DR@dS{%W}%T84Z->igN`4~L={a1r%?VOrYM1tM#w*BOA+V~ zvs|-^r~n5Sc!2rS|0oLs=m4cCW;0*Qbi0p5W%1v;9?NkgB&=O^6 zPwM1)k7M3mn~HBe&PS`}M)pPMq@MH^aO+Ec!9RNF6=q3l`7RuTX;*$}2N2;a>EA7C z$1dV3zb@$hV(~Z&-jn(J7VsE?+Gz3pf5>U9QJQQSnPaq|wZ~b@4Nvm66G%shh1Y6u zk)k0UIo-Jg5Gy8)xjmY3XEp{pBk7?88PLSM)R%42(h%bo*|jlr|F3$U)qMIOdoj`% z&7p6FWR5WvRNHu4)gB_V3_90UK%o2>q22L|?+S>-jm56WPakg!XSMH%w$|h^$XD3+ zpK=FLwd^nj!J}^c#_O%UX^B$_rK1p?>r%Ya7Ddi$pl9Vior%E%pQpjCQPT`45ba77 z9ya~=^q~JUV?4)I`#=7F<~C?VPW{jc*|x#bjDG%prOT#GK%kZF_E!&Jny_o12^Xuj z4YEO&Md*mugG!8E6V2JZghl#1gfa;d1Hy($-u@ce8n6WI-!Yu~p%9wdEYT9z-S@R_ zphAuU^%Ihqfzu(SuY+)5#jGfJaRrk7-6D?{@_Pi4Ev?hoTiOLygr|bpR*|Iu1u||A zI*;(5bsAg_F7nY-xh#qRkO30dQurS{bHW(M!zE&8Kkxr>=-Fbd zbyOP!9tq*vbprr$bO2eE&vpo}8uKDrco3<>O-zH>Rq_&3xDiE6`P}d5PRSql(CMw? z9~HTXGM^5>M$T`#q_#mK6}D_CDO4@sEu~dPg%+N2A5>Mu30m)&?!lhXuxGgvYT?a7 zi9Cw}v6kRKpG8iFsdv$&brDIW=E4QDAeD<_%a^{5TDI z`CXK83#+8%Th^k3>4HkCSZIS3#e~FPF$cQ11&7&{fVNPe`(>yXI%@5A2e&q;w`2(p zmIHXvDUhpepV&x0A@=&f9!b(Dz?JDhnyc}A`A+R8;*EtI!WJ(gtry7q9sSm0;0-PZ znPxShJmW^zN!d1$(+5f2$M{N}t8?oRA7TEk!%yY`{U3+XKT&@F8hR=JbAf#;3fJsH zYu9@hexEMtyOH_!bCO@QuI+4@8=93`zyV3Akv4~?Lh^bCK=Pm(`exOH1y~HuppL4g zcAKPMv-YkIFZ2TDB)-}GWz7Qs55o&a%#>{#E>~9l`ygA3`IMgK?*XF;ZmZbo<$9w_ z$sU=e8%+jOD)*u(fBGM9o+vdO>tQgsZkb?3qa{ChnWZI;oUAYl0CKJcjk^l*ZOLM| z^Sg$vvn^?5G@W?p5Rww0kL|*q29JdjCQyNc@|{71)nKF@-Y^(h2$ z&Zx_)@W1VM^dp{iJEX$l;;4ac+Scs-he2!FFZC#LTKy`T zj@p4~N{R8ENpY)KPTCEQxy<*pdV$kn=o+20Jm8Lk9~%32`TfoerNRDr=+UH*Up)6; z$3Z6i^}h@Qe;KIbIFx*D(&ftQjt+8*qg7e3%0S?*W;yh*6*!B67tX}2z}-YuQ_4r&8ECKb`e>2 zX>Zx9NS4C89rq3O);hxZkk(8r=%KAt_z{QF|Giy_EJA@zAX`xvc%IIUDy5&3E(gnC zSZ-TCL!-BXrXWOE0S!2zPaQ$z_mGIA#Z(+Z;;w_{XGa>}V>S6m(M@m^ToBgGgiXp+ zt%o(>`L5M;psv%{1tOJQ$WZ3zY#cjndvu3A2yB$Ue&^Ato=zSuVpz!M$si+luPhXW zX1+VsAqExfLZ(e;pk=Rvct1Npjf3{CIf|>ujNU}Gid|@{;+g~8T@Rrhy3`CNWgQ2} z;aCQ=&u)L50(OT1QZGhzrTNnICTaPeWECR`Y)ph?CsII}worro5IcR;qpSExTP2wMC#H5XH5Qs=!fPZ_C_}8Yz26R_nBvIns z+J(f&d>>YTjElir0NPn-NXnf9JG)sz(0a!+9impt39E=*n7xc0S;!YzIG3Jz6at<# z*99)sD7xy_@1BoxnvN8d^t4S+!ZCS86I z&*!?!Z*W`b+u8y!gGG6P@tQPy(E{sE<$^M;D8Flr(@K$G&RWj(dQhGZ!xJw70 zyQAbZP93T71I~tns-qQRky8d6u=*VH9d6(S`13nrw7SK8EJ-hi1lfZ?=3FKmA8?{H0C0O?u z(V{M~IU_rb?+-R8Wo>3F>T9Jr`sgKL($X=3#!8EFr7H=5fI6Z!#0*SV$jQLKQ*r?+ zTyH%iD*XmA)ptQFxxfa6MOwx88t=F_UCj;A1bNJ_S!GlQ*gk=_DH-k`@1=VSTKF31 zHXan{+S|WUJ&gEqZj8^A5HgddD9K!FKi4x)Olm<7c|xj%!<1eyX0DXMHHNLfxW!d&YhR zpxw5#0Wbf^stwdFZJ*WuI*afNUfu6lS9xXC$-G4%ty|fNi#Cza&!R0F^C}<~aST4o^UShXh2s z&@~;6A_3y=m)uPaw;$*V%&&I`z&^-Wzg0i&ZHxPLx=Y46!F=+!;aA1>lzNBzSN$$} zD;J6Cf`qmfv4yLuHOmPoA4`H_HTU|u8WjBm^vOQLUU$HwI_n49v@G7^2lMz00_*NnL7 z^B;Pzar#o*K5lp1f&x0g&b`nHbM+H?0HrxM3FYFCkH^TP)7A%Elrr@kqK3c0eO^RS zC}on94$6q=a(X}aDMmQ)J;9(;@4gy{|2D~|`QuX;>kYHq6y$~Jv>)&l+Dih?q^q0p zI6BUB#KXJJdTu98xu=or8~s0Gy7VLgURQw1x`$p6R1rme$ur>X^5o>`%4E9DnbMO4 zrt-M9ME;aWKE>8Rx6-@xw{kD%tuiKGd1+}6=wbv#C5@E>P&xW@Hr8;s&TIHFh?5IW z!p~V+OYNr>%M&(wug-c>_MInV4Yoliz}#;BuAVghh++YJ!A3t>(jG?;r93M!JA#R& zy_~2y8$eRkk>7ufk5eD))Kr;lS&#zg0q#Ny>)!DOV4$^63GMR) z@N;&ALs=j*bsNfEEP^Z_!o0Tn_cY6ZX6w)8%^%dP8oL;IwXHsh_4-|a>LiTDoItJs zf2Mr0{Gso-zvF#we37@ON`Vb_PTKYt=w?kj+|93>GJ4M+Pto>J>q< zucY%^4V01_bIdHj8o*csG7D4P@7@o31zunCu=37}_iC(b)r>;fG$hvN{c_eb0~yG{ zbkY{O4Xw0oYhg#IvX=5$+)S34f_sxblCoxu8ad@IxCaK>zE+e-W;LC83#eyymg~3} zfPCNk;R)gyT2*M=1M#tW;EKyB6KSx7)~qiGU;Phqvl$8mFrKghxfmL1boKesr&LlL z$i)E3{a{UIO`{#-%B) zOmUu2QU=Ld)iXYqvI?!FY*)&2Pa#5=t4ElkbYXT@L_KSBDNkDVe)Fq?*D;H>paEV1 z>8&L9O;hMl7Z^$+nm))M7zGB(J(w1QEqI`W42@#Xr@Q$d04<{ z)m|o+`@l@byF(IofslU?cmmrhe}PYywf0AZ8i;D%*p)poR#}~+4VHd_@3VL!qB?gT z;=E25@4E)1uAlSXO#DQ;XY2Widb&$SWcZ5d-0XOTDEv_poKAkF@^N=8YA_a}7#)Bte{FAAW$U?qpj{Xi;JYV~LVGPE5hpqoxAAs+kejq&x%@m%9gM#zu@n z6x0mlnB9^RohG+nhJ|HJaR#!KtT%2PbxT2a3a*WxlG#1`QbOHlz+%k4nwcIYIFI}& z{vPYZ_=X<@YY3(g{@lh_Y1TSA1>Hyzid1bO*}P-JD5!w_$ek5ab#NsIz!H0Ybp5nzORQ(m$pv5f=9n zta6u>b%%&fOKGHDP747Oqi$7|n7t2X@1VWLkM@5%4R9oAsV*7NUqjS1vir7LAg{+w z+MpLdV$fE2`_m2pc9lu%ZbnaO!!uZf*xv-IQ~dj#k-U3THw~lv-t`jgu!SWM17iY_ z^yNYX^Z~(bj=PeG;`4XGOU)^PZ$5$%78(Fdn3n{E+?*|lCrxKN`+?dFtNPu+v{PJw zD`E0O1TDzg)?~|FgsH$~j7Ml$^^%R$LM*PpVw*BZHKCK#1Ksl6Lynk5vEA}+%hpW(lh(WtP4)On@mGF*oYI}Vqi4Z+gUg9%Zpzbw|t8+bzPvku0H7Q+0L2Q z4B#~>|4eHal&H4EhSyAc4knW;bo*_rmzb1xLsnX%WFccl7d2@jqR%2U=&s{r5O&Qt z2!Z$SgYTH0g4rT^|I{Nl&3^gs##6#@m5Eg%ji~f=T+j_!BH( zdS9#-5xR>jq^9(lf29{+qQ64qKLzGWG<3x9!Y9Bu;R`YaEK^P0_P>f*Rz7&>?n<1M zuO4*_24HC#!%jg@tEk$R$j|zpJWmsFsUZv(1efLYo|5dd{+U zTy?lTb%EY4umsoTEoucX7qYl7rROlq->CfFV{xaw@GC_RNxll$V&GrE(ZD}YAJGY5 zp8^w}YSW0H6|ZaFLgbvT>+z}EZ`zlcKAH)hLh<*%JJ1ht0g}NEL+&v!Clk_+qCl?5 zy^{Kk!y-^=J7UE!8%Y&Bh!@f&+&xtz>W~WLaXN_M;n-t`c+R=*0`bNN>#eLsOR9Hr z@BnW+To;UTbouGovZBG@Gf2AG1!oD<3e9`VxGkiz&+sccuRu-voj|zI=ZH>@{+?P1(>Z^qyUY92sNZ$`1WsUa z%QE}+dX&qT1TA{0PL&F92N-=)jd~==R5<@w<~C^z@d{~i!r;^Xhkz{ZSx~WUK`q^W>8ZO8_nH}DYUZR-z%Q`D5kufJ zqA${}5XTV8*X+Ecxt?`MOUn^#AvZPGe6NX5nxMvj7hr)77`SUzUiup`%~0kHsxISp zh>GXOE@sXQSYsC?y;0+BqSpdnV$p$1SzqzB_nc?A`RCBIGgaYWi9NtZ`}1^NBTvmo zJy`7(tq#4amKZAy@tziB*JF^W7u>(~*~|>ZSEZ@Mtq)BHY@e2^(76b|Wo>^|?Ed(3 zkG*z0@vAW=sgG&y6HEjg?aZ`bIHmeV`KYtSTl`{cIV!v8I<;N2c0v?g^&Rd1qF|*q zW9G+F9hC*%0-Ug+o`%jdxCdBw2a@9SuYH)E&HbvKYW%DPs`E&`MT=R1SLd8P3{85f}4qewVjkb^{$>Tv!eORnJlNNF9MbWLC zYr%Un_fejP+=2D(fDlvHNRD=i1K-kl;ZikhB5G&x05ZNJJ`utv47_ws0LKM*7w0`_ zQ+yq34}99>n%~=$p`-)5OS3FNY|Guw?^qf~c0^>&%;d zJWaKs5?9QL<}_6cylZe&X#n~f8!|RrPwo&jr+HfWlpWP|)#Wq<2w{~x3?;N*gKx(j zxzzP?3&KLNB_E~Zl*X)4a}?z$B%!vFN_ae;0@|~{Iu8=*3y%Y?)GWlsk>dO? zA@$(;?dbs8Hi#x($tG?vEXrRU7H!FxU1YI=OTw~3HnZZwWWs2q-q3sPWyypq3}?C1 ztPN+MDZp2+Hw1r9ucta@%)5DRcsA~Bm!FNg7Yg%w&uiO)&0(1Tw7}@PR?wdL=mtZ# zvVAuu>iAokRC8&XAF^`rJo!}f9IqcC@NPa-fs!}UsgFP2?l``s?6%XBt)LLQIaU{R z+n$uDePk`jo>y^PPbHBm`Q`C%i$i5AIUZM~)YAR_&vH?u&CX9@H6ULW2qV-H2*-N_ zjCFL>^aCE`R^u$wX>zG)ph4ONswM&^T8GCvck6>DJx$XRl!SLUw#(zfc^gYcTI_QO zrB~7XVJt|tZKh+X4ABSGb9_-@bR z%YVxL$CCV{Ca>&O9cIkC6Iw#cz%B#kQ$iHCFNllu=%8Vo?*%efN3{2goE{CedprO(KHv5=aS%aL6K&_nR*N&B~G2g`%70DQNf3+FETxc>Y3egqJdp>Hd9oo5e01;nAPV1bg{ z;o>s*j=ckrHcGgx2Xaz>3j5n$7ZN9beA+-a0ErLVAF`8B+|+?>TH79awNqVpjeOy9 zi;2)ilH{9qZq)v18Exx!@;EfVGgxIEc3VHoUlGnkyA;mtBhhov4`M}>fp(d-k_GYG zxS@MbY}|c^NYHE=$M;2{UW{XvbgEj#331U>< z&QCxY*Lq-rOZa?omfs+NT$FyxCNQcPK93A(VZ8)V4WtG(Q))eOM0%}5U7o{V_m+D; z*rIOG#R^)|@Ts)Qnleis-|4?dGy@BQR;hKPx}x=DvBPl=PiT>I*sQAFY1)$PpiU zj-Ykkrok|rEhYWvMKF##>Yj?Vi)-8gn27E{X&{f>S8r?}qg{-LWfS0W%z_~k@puJ? z-wtaK**qy-^CZb>8}02Q8BYL+m=$cgQ0~fYm^^uFe<`=HGZ$w{;B{Dh**-LY>6`rl(938709B~eL^Uh-myRllXY?HC?D!5az!O&#{E(y z@di-;LIagO0ZGd-$gBM`Ub|1HC9UN9QtyW0T`E!#Gc(EQ5TWUO0V!e-NDJe{;3uTj zcE-^dwxp|UaT_jPO1$a1bxw0Zb%mN_J5r1%DzM}K-7$H|C(@*S;Kkj}S?C-S1A(M% zpOq1QMxSKgy{ihg!e`0d8^gTB$C=;|S}8gJ3T>6F(X9>AOtSVVh6s*WWKxOGP8@HP zlghJ{eP%~)#==E^<{5BcXv^OJ{9s|vAW7le8G*;mml}m3+z}Cbs2LGDq9y=h&h*5#~2%}%Fd zqELkX=h5W|QTYKZv%X|dP*HPdAX4VQ8rmOMS|F9bSq}e~N)|?h$b~Mg~vFRqFObyg9LQp zPBYK`26$}1-=K2uBWycGh+>BA%KUMI?xX^!Prw+3Wta9Om{Jj!E$oh6DAy~`uB6$v;D}8+sF^ApADT9ce|{w z9nPE`K%(;nwbEZrOu@;n~)+hjr9qDNB;KQ1YL&S5vp) zrz-7~u!pfyli8{1B+T6 zh0U)L>1i~Uy*x*Xm**UUu1*Q;8bn)Fx&I7XZ+;SU*YV5$J@nxA$-(qXB1isD-DAXE z8rF8W#|~2Lv~RDS$9Vt7em?F6>KNyCUdjk^R@;*q;?MW~22k{zDt7vpMq=DlL`UxUXacw=)BC@a#hNgp|M(x=l7?X8GCd*WzM(WzB(WGT@8 zjJ?vAG_^q4PtdMIw?4QQ=u;_Fy0yMK9(U*_u-t?kUypmfu3si2kZP=Ka<}%Rk9cvA zTNK}$!MgdYY;d%p{O9kD?HxPWWLyr!^}DL;8!I*X~mprXz1h*>Tn6 zc?QnU(uec5{+>KXicREw4qyFvs;k}Q_Z~2Zuf$NmerunZ_zl(-i}TQZwv7|qW4Vni zGZRZ^a#(3~WKf#^BREy{k*7(mH+S&jQ@l;OEPBD;l6jK9Y7+dezs3uhCG8tQ zPG+s75%{}Z{g(PXgYg?bOc5GTmSCEOirhv|wFF>D)u*Ix8SA7jy(Uyj5K79h+AVFP zu6A*rSx%D#un^tlmz)FA6H-P)u;bUMi+4;eF&eu|Ia-&4#3Du2XAT~N=?_*uFUK>E z5_UWsWxWnU2skwIT8wU7JslTDN_sA72rMU@+UFJ2PClIpUrE1{ky;`W7ptk22&UT| z30&8x9-xxaB7^cPvlQFVddc;alg98!^AMd*3sU6*gxvkheIusUAsRHI=OwFO2Y|0d zaI8SnsjVxz!GqPPdTkjsW-in@Y%r8JpErFaHc>yKf>Mb_r2yTQfn&Ag$#J_}s&el{ z!{|l=&JLYP9jRq%nmLzhsh{2|1KPN;zaZGzzbMvBO@H|6J%C1uGD~goAVRE=xC9`b(r&BZ&US za$k*_rCOFFEGBWQfXRnAMczdr+wd?l!uJOIyQrkzl-PDltD|`f15*f7w%8xALfq{R zaI?6!clzc$|GouFU7{GJf~$iU(cg02P(2qZVo&VqIZGPH9cXsq6F(tA15{vC?6pb@ z9@eT`D^8Oewf4`MnS;+67v1An1NnC|_ajnCo$R}_cu|O=Bt;BJCf2U2UvJl+-1zzI+Rg02ivo6)XU82LJ7z6Vm%XupM{W`7k z4W#L6WC@^{j&^X3CH1Y`4(}P^aOrb4`c9*YW!x|fI8k>gq20_6SY`QyIbZUGa;Xni zs1gvs*#GqH>Do_Q*k@5O^3l!Lo9~8Y(QnY<3oSgD*J@M3+U5En{XF@jWc8XIaFZLK*mi^r>5#lypaQ3nR0x+uiT7QahxQh6 zeS<|LUN!+GZKeo(pmLRyAToynq^pYp%Z~J{2geJS#QO{vZ;>ihc-7Rrh5!`FAD?k|Uxj_#S^HQ}ui4-z^leRexd ztiHs)RPhNjjYC_|#e-X>ZggIRz^_JG@sHUrEepDwXGB_b+qEN0ih?DC3A=mGHDCNT zs~#Ik+3u0!-vZFMQJ3{WyJSYE?UsaFs*>}xSa zC8Sl@sxpUUFV5~!i@Is| zfDhU|;_D~K{f)F(H)n(Lvlp>MNX* zB3mj{n8y67u5Fg|^gNPm)Z4jp3QR3NaA4}rnMT|xKq9?E7UH2(4~rLq9|HWEHY!Q+E>YNTtd(I2cGW?_EZ2izCT zo90V9fNN;Mu9@*VQOc+*)Ux+tv=hpYNSwAn$?pTg=BF|%xbM)eFAPply%cFp+#dvS zuW+^0jl7xHS438oCXuNXJov?t6Bb5AA9}a`Jo;#S9G%jT{FleaLOhG#;V1lN9`^eV zjMSEugp&s%Yw_wKUM3ofVO{4=+3U2LS_t0PZZq0`i5&F1No(i-P-f zt~d18&~fU!3?k*7j@tzKW1AIIhuOj-<}mW@0`sZz5tZ zaccY9#)Dhk+1~c!y?#1+w-Wx=dWR6pfE@?^N1bc3rfmvm&AP`OWjxEci4jovBN%~A z`795f;w(f7m5WsJh&PMe+mf5!7<7YORE$`9E2lE!9g-p2T4W!}Fwr~IRdPPDIxvi^ z4l5^>ROh2tH9|k+X}pI(udz!$W=S+r|2iY{rNC?8dLPOVO=n9A?ey<=Mnse@4a_!t z^L)D4fP#wOTRz>am!Rw(-{Rv31}yHkeHCy{x0V=p?0JDZ$B2Q*~5( z-mWdm_OJ+^C)bQgN@zKT9KCAz9{tr(Tbhpha+b@v(^C&A!Q4*TUvFk|V6JJ7TyDpX z5I1me-s%=Mf-66E?o?v8BsL^SZ&_kEq``#j&{HT<<2rbmbtC)U9cjV;fg*K>5g~tyn#g|wDX+YFJC#SFiX9UmYHrb zy{i9Q{VrktIVq%l zB4={o2fo`sibfIRHnvhPuhhIfo9OW4Ko_+QR7ER3UD)x|*T$9iUN5g~9EW!l9dA)T zp}H3}H|V6|^zHgI>t{y$%>5;Yy5ifAGEI_otMe6Ytj3UCp~9x+9D3$FIz9Ywd83^C zW6bRzdNYIAk1u3!swHkY!0GbFo0>y~_5(RPjTea2|9UFTB;aFB9qW<2bn#oQ;pI(h zc9m}%diOZ?x^%C%d{B-&K@2P#wfB~dxi}V%(-wZ0WE}=e%kzNxmT98D;c}Ik&G%!x z5H`o=7{IyISFTm|M%JDsI=0CP@YL*rsTWhWM@*gRXXWi(3D4r5-q|$B8h9*T z-#Q=c3t7X6ywW}$3v;t=SbX+nJgjA(0F|SD4|CGx(q`_dtBQZ4NU{$|+*mF;Tcu*S zPl*k0q>n`#ay-p<0ypngwxLBxr%E4nj(%*C=(@1;Yg`OH#;mG=L) z`L|rm-Xkz*FXc(ACptK&hJO9(O}ban!)2W`wg?);C7#BH5Ab6(YJ`z&st>I3rfuVbA}E1$<3px&Xt@LBZd2f$qxomDPWkugwIc*7cxQ=IEO|+ zs_nkxAZh75PK>gL zR}$K#?Yubw(Wp7q$~s5gi9wCn&QeEM(+7YZYk&V_N+0i%{7h@8uHb>sp6BJ7gI%tTBjCBG_C@5Q@d?+gOaFsHZBQ=K3?uS0DNjy!a^6_@!<$bMFPS#ex!mon zr<(0kx1l{a%ikpZ)z9>IrfR#;=$hU11D^k&JNI_hxH;0hqLX&8`Nu&XDF|hyv2*=J z`pUa<)aRt22dVDhXgyk6DYitmg-G{dS4>hp!osqEmw`Ls_9=JfIvtRK%Y3wA(=iJ`>XO|BeKgAp--!Sq8^Cf z?t#<4gm0i*GY1ULR+pHY$mmjvRL5TLE%|t7=FO>a_s60+)qWx8Ib0B{!-ILBR5|D+ zrwTkY4wDhW{rdJfOWu+#p|bOk#+T+?77Zn-B}swNBE5$@e5CEPqrk6Aj!O06T4J7i z8SM5HBojL3GmIVTCOtRc`w&f0dJpOWs&f6=G0Z!%ru!HN?k?UFF;?#-`&TEwI2JbQ6GyqVb(WH8uJKmBFnq=BDxfcB%te;}hv z!oxc7lx488j+Ot&p+o=rwuQeWf+&kg89xBE`^U_vE2=1VLKd88r6-H*m)qhs%X%DzSr|t4gPCt*6*OJ{FRM9;Us(K1hT422$arogA+Z~*7B}10? zgsMChVLMA{5h9U}1!+Y8w-x~1sp;8LKb(y{JQs5jIv9xzZfW|~8SDwVZ{Hu;g;0}2 zVaTvFsGvGKuK3zJX^KjehyV89F%82`(A9hMG%SYq$KqMyez@fI~e!ylR_gagM?guXv`)ps!7aMcPJ{3Fk z%@vJ`A{G>L=*Vpl+ZRN)ppA*YCH9UU^$1adIJ$TbqK)Ihl{?Lb$!1)5%kI!rif|k7 zCg&*gYEJJ6Pq?-FS-zb}uu+=#H}(JrMVH0LYO&oUu~%r{9h?=}F+Kk{>C*#L%E|!w zta8>Rn^b73Pt4rEeK;TFA?aMP6vO}CJ6*oxU|i^U4cB={b02(J`zHgB*MsxgZ~X?C ztCvupK&Vw##vt77edh|E5(ZO|X?(E5VpF6E4%)TGWqbEP2DhhV2|4bNVssKLO>W3l zf{CJ=-$sMY_g})ZGyj}i?vI0B+5#U4DE}N|ju-8}IgFdz!-SeGzg0}FCpu@0!+&L~ z5!Y772-WGLBeW@sf~g&yrf5bd%I>WqdW04H?PLhQvYO2WL-zM*SLz#Kc|xS-oQ_@X zn6=xQMzpc$pDGkWFDMtpxc#QzusqUF-loFNP{1Ut8I=EVk3exUYSSh3F$E@CVr$%; zAv~n@Mq4(_VkRNOa*|Y*D#9SBfq%Wax<2&Xoh)WtAC~UzIB_{DY%1US_dYY>)FBI! z67DG3_V{rf`Pbf6%(4b<#bb6pp|QG)FuX)->omZ-;ZAGv8!=c`dz+wjTVF}WcZ0rc zszPusAV{2R$6I8uhPIs8(nOWfq2;R7s-;mKZFAym%s!_ODIgw1gEMd0@v?su=@g4bD~ zHZ`r(z zR!;DdB^~!7?_e?wX`Tduk>oz+7lA1>59W(lhcb<#Ra4f1?;nL@TkJ@PR6lvWj zL&`25PV9%m7;cIF9n8ztPfK3;b^&{6Nk>DIp5?PC?$?XQF>hV@Iblq*knC4inmXkk z^I~WT3!cZAmv^i7Y0z$ib{?fVVXOk?2S5A%EB=fl=pKYQ+-{vyR}qX1+7)+WeGq;( zD(J}ke0UkhJ_EGf-^&|)#Lg29r!1z`4h`CE!Y2#p7lyxW6r-NPB#4=vljxIin_w{g zl(-INh`TweEY`x6>St@g8I)3r{MHu^&~sp&EpbTXxAw~)>`X2; zNa2S!x5LgqBEkeG;)48r{o?#x4*N?F0*)Z|nwSc?2hCZS0~o2cx$6iqPt(D*EyCC< z1`{)z;lxB$5fjR5|DwfOcKb%gjl=R;7=Em=d?9gI3nBbkC7E10US`P9A(}r)VGr$0 z#AuL2!7sUE_DhqnIvO+gcEo=D3t=8(u_VM})?#Su%cMX2*@3hstsfS!HI^9m3hr}YWSgEN(R|uBCNRUj;;A055gW?ES2aX~uTrpx>$rrHH`tGuCfD9PNU0)PmQ{hMp z@zob7_V%_1Bd#$)an7s*uGiaMY8m+oQGVC{6+|u|_3h+IOG|P6dyuSfH#O5PV3%#_ z3ayP6QY%-@Ww9&3ER-57`xmh<94=3@RbXpmP{wd;3&KjNa<~Y4F~drc;4ZdZW^Lv| zE04UOgb3t%@k{4_!VCW_W^mdxE@L84Q3ew;iU|ed%mJySguycq&SL&Fh4|of_qWJf zkU=3PPc&3U--?(HYx%I8{l&IlG{j5VWA3+=n1Wq@jP30ojDXcq7-ofzaLX$hu0ae0 zx0M2?iyIdGP5G|^vR@Y_91O%T(Zom~)%?>UjP3;UHIIkqtioL!gw@ABYkl52dp8?Gpt ziEK#h1V(CXI2~9z3YAVSVl7u`$r8H9WAX)k*5*$s^;%=35Y1kM=tQea@KKJ=+`9|7m}quo70|}u_7glX z(VXg`@+Pp9nK{4-kG+!|`!8(k@(%~x`>x2*dcVQzhuyMT9U*rBBUwWBlI-0g%VPK~ zhKhH$!djHix{uLu8yFfum<+*LMC1HUW^;p232|Q1E({|qWJ=ZeW#>8#zjsBVBb-OZ z_n~q{(F0*Z~X^nmH@uGlaV6_ zWI0I03#mHgV5{hD(`(toR*^GUAf|w=BJf3btXWCsXu@70#WVx6?Mm)x>KbC628)e6 zEdWeu0zX2E0E8dEJ$;p-jz~4>#k|veT5l=07gnZJ@N! zxq2LiS&IN{y)n$j>xsZ@ydI82i(1_yBji9hgs&uEx~=KB6C8A;eS1E+#^A1VB?tM) zYbN-m$H7J0^i;4#%~{?;C}N(52?69<`gjg=EA<8mXoZ)-UE@XaQjiSW7`f(z!+K;0 zV`bp1fz0CzHug{^m5gTc#8qag!DhmdAVOGN+z7sKaQwqubP%I_XBXWvW-WLam?pt* z*JpSMni!)}0R4Ky4E>D*C!hd%VXV|1=SFMJJ(^iWw7NDxwL>GN!m|4vKw~6{Ml=~B zqGYCyX2@P@evx}&1t+LM#9i{M06HMQ@{WfG4$!uzwaFw&x6?G&?A-r5d*gS4xHC^# zT!4rg*$3Uh2co{azfLdbpD{Bat= zpQ|x*Em;TjEpD@7f*Lu52qI*rcEvdc%DH1q??QFt)@@3^2!gm?P_y7&uRb^xfYfmB z;@_r$*W05Pk2z%*cyN6qVY6+P^E|vilqsmv*yPAtoQ-I(d3`tu*<6H#e*p?G{k_Pc+ zOBoL#;(Poc@`!7;f<^fG#bSH7wHZ16dxoxDIh%F0CHAn`*TkoiYJ#FjOK$a+Qcd z;>)tHb08PzW`jmW9RCcxNYA%sOW?@sd+pV2YU2XCX`sME&gkHhUYtit_Hk8Coj^_d z5=(;L+3D-|pa_c*LctBX`8qEU;;jS--7r|LfrJc0nf)B`Tw_$#>C5bIhgwrd=Kt6V z(E!_pq-j^ zw~fHgG0-IA`To6+AR6z65bN@**_Pm;)u}(KvV63;x>(~2?sNZN0#vJ5392qIl6sfhJ zfz4(a;KDE&thPQ$7N>P7Gx+9qxZPu^+kymQ_D=6eD}FBU1Hdh9X8s@KOR_DyFn$S1AtIYa{@a~n^!kPwrAsFKKd>jvJa&5H^E49 zhCPSc+p~p~b=Pw0GjFZg7QDg+AQUi}^VpzR;n&Cp|5N%vuw%qDTKe(X)I>AxFZ?cd-G|KSMLP(tny3?2X-qiwjUkl4) zY#=P$R?8Zl>I1-}HdgQ6dRkwr3Cr}^l{xLIwf-*#j$&jkwq*@td(t@2__AvE*1Ycb zu?zzStnBOM!P@6a(2k9^wIS}9TL)vgc`;mcnvLhvOIKdG#I-f#*AV!NUL6=( zmMJ#nPaFM(?SF9Me)yT1*g!1taIEt=)$EN7@qSRaSom!*_0|W+;}@0TA_15Z)FUdV znjVaJl@kyQ*c{2BKc1Tmy#DBxz~cPZCE?{R zgZNIEFw@oKp-26+pAiMb^7Ccn4T=WJJhQ(yTHeu7nUqM84@FfWeUHCDZazP#PvlU| z*Wo(_*BgaMp(&6kSV;bXKDzZjxOR#8OM_rG;@q4g8N;tf@WU$2xUCd)3wz?|MEL@Z z>Q?~{ID=b=PjVS^*+pGAlt39wDeBGa9) zJQsG(@;3{^JjGKypIyKf;t!qw*G-(bHLE9Cc;kCTd6z{x9)9nWf5+2Ka6uE-3D9xg zgC34kq6)|PZ#+~rb4lr)H}PHiKI@lQv{=hTu!YrM)zIZ2;=!K+C|#>7ksNm!-5}D8 zBbz?0DD(ydCB7@sP&A(6O5e)@`j87E^cwGg4XT$|6wE;UDYWw=A_1MCzeZn|P;0pe zyPq_&MDq#H4@7;*%u%UX|7YG5090L=cQ`T~1beythwwee$-eImii>p+Fu5%K*5RNF zgR^n!3kT8PWS>{(6I0#}I$Xuv?GC*vHTERPArHg8Ycj4k$RE)+Ks&!U$owHbf1?9O zE341Ta4OeWPIMCfOWgCFGviGyU9DY)%Hp00l@(#Agsr%(R6ksiyq|4W6`fS7iK46n zvke#Ke;^)-C7t3`Eu{2I;iusAO1Oh37-t%v(Jr7C+FRC9v zUH@TT){;JWUw_eo>=9LY9-^`v%FY-=)QncwrwKjkGnA&M`|3&~=PSPF?#xaq#bS^WOwCmeOfc_P7RUEc;1f1bEtAGC!ho5|q&E zK>9LYC$8)reW@w5FKQWcW57W~UA3gSVn7-3>P##V3Xqt`N_tQ3r{X|%KZd(bKybo+wGLM{`r+qRqHerx8?;0qKaMN zmWZ^G+?S9|v@tqCW&?vK%!XT!)JBAyNwl|cti3n9+_j{3LbVfLPqQ%g?5C<-mj14N zg00udKg7Nzg-Yw#YwS!qUN><7xkVNxbzf~nPevQkz-I~JUfp^PD@DbLf|3o|TSU$& z2nkSfKTNYQ&3xX=QTH-r3SOMHQZu@HvDMj>M1z0xUasiJVtw}wDVpV7m80MEt(c`g zmXq?S1?Z@53k&I#9pC&a^&t1uE&f2Qt@p35pY}cfeF8RnCBBQFKKCG^#@UDG<+Tme zpKG6laY(*0IxW0IkI%AGFKN64Edq+e#5B7fS71X`O~1)F3G};^kD5E(|4O06LNXT; zCo5?|Ux6BQUMuwNa{C}8Orv_GZq96O{d8>( z=8X~^C3VL@cAGnp;?8y~(WYJgEN`h?HsbiR9b1ufBWEkNwg7MOR_dtXAm@V;hURS1 zU2MbgIo|f`fk~R0ri;dX`3@)6r&N(9{jX;w2~b&efntRBv?Bh0_zh}<`jHkJ2hE!S zZ(kPbi|)`B$r~-c*lUBq;N@BGn5qk_s$n1pBb|}6&8Ij*K~JYB@XCu8xQ|LKQSq$8-9s3jrqAWq%{qj0q0KVUYcz;89CX!pu-$!%2mQh}QdV78E zWwN)>(jss5AdMR{3hx(bs(+0q@-~5K&hj38Hk39qx96|0={pATTxC$mscTYaG?ZW- z@D+Fdg;(2`)x@r^`R$->rC%vcY}fAg3GDLi4Utf_i*i3 z*1SW2l(*P+mZ}fSK27^25N@RlRgFpVB3fN6q=PW8*<-saG`m)8`qxgm+fSvcW&Eng z&p|`9aE!^GWLacgS+TFqSAMJErQDte>$IfMqqG?66Tt=KQv5x96Yj=r$Is@>CDh+P zp`g+Um)<1hQV3mRDV1ifHmQVHKyYEp!u1+Q-1nKGYrA$YyQvcGc)Lm*p1jvDBXorgSm7xT_UX= zZa7u%@5}#cC1ASO_4NC#zOEl2u-wp7)F5(kbM{8wG?1>6?$~zd8|5guT^9LQ-WrJ# z3Z(h7!E!ypbfR0cOe7s|A4y${?u^Ep4A9Nix`*<5lu_Z7~0x-ExVO~K9m?)tAvZv-N3 zc4(hK$?80^lU`mKcRW0{-N~|Nxc}VwPL}b*n0tXA5Gy8k#P(he=Qly0&(%`=9p~8T zS(pztISEQ1G$01C=wlC<>akgkwPy97TsRIE7Jt~c>Es1NLr-n}`Snl5MSg!eFODPC zC!y)w(X35_h>JiWlXp7}4_a4;uBlfZ9A%Eff1pDJAsZf5QhD!2nx5lQ56UntU#unM z`y16R*mRKHxV;qTW+i4?AmUQ{yk;N9M4r{JP0n!+NX?-Ix8-YbKlmZD9@NY@h1X1i z&ijK&nHl2h-!B%KmIe=^AJo(3pr|art7m-Ex?}1V7St6@e_mDF@hOt$B?%2Q8OnDz zE+#p4enMiY$q~ z4ts2CgVqm-`s`5FCK)U=A~u(V8fXJj!kpMu-rMc5HhoN2Y%>113N-Z7cw4WR@6gm! zN8Vo~8BKMn*y96JOBEx|GAXQIO$+{yi5x3GJKVO~s{E5t7nm-5qQ%6L5CJzaE%&=` z(6v$%3fpZWL;w-`^0P*QAGjp?jzOQ(qua+4t-B~15-EOjReNeq*4s24YOOZ^AM=0V zTB(PrdB>rpY1^R%+KrxI@wo5!e2oqI5YKk0`F0o#S4$R~IAg??qwF`oaOpn*i+G$# zkl7Pwjh{3N9P}GF&x!tdvbgW^!|$o<)OC5Wy0O{IhgHBVdgN2&7^(=P-fLYWpY>F}I)_rProU6*iIZSY0! zPt|fff!q2r#U!=oL|`sNfL;3M5YUQ0PkgLqJb6+05AgLnUlx(|DkYQqJX54X1xDUi z*8NYcxQ^_5tk}*5fBS5@^jycG@mjnJfjJayUPzmU`HRJDpJ4+#!Ak^e;`Fuf0ZZVx zG>2lK*E^wEhXg_O;{P@laG)Di-O9?et;tJwH9<6>_C}Ttif|v)B!=sQPv6wt^_;5w zu6FZ_!i^on3zZ!nV!;VG$=6`aida2*gi=BVgAwC15r%g!0%pZt4E>l*P zmvV^g@9-~?G`H27WEIv$32A5`a}siFAG&GU8pT0yGTyaz(JRiIVbRc$vXP1=c}(`O zCRkWd#4-oCeCDMN2oetI)X3l5+-mjVCx{n*VHpTL0s05TxI z^1C42;=@LRx%!a z!P$R~ZsvhBRm;ckjYSuT{#$F}EO%q9jFo;;ma$Wi)v6gg$=A#}z+7GaT&TDbAwI5^ z*+fbuU2%T5(InnviFJcx;6&ebpmCIV@6k~Ez;X3C^2hZwC=un1*CQ(X+rdG9siMSs zBb&5(y9npiax66qE<^ct+5Y;bofw%GnKo{hc1b7ATks2u{bE@a?$v+AY0SH~i|TX* zucT?LS`b9%@P^{Ihj<3YGm5X^CpeJV%&VCZn}C|z1R^+N=CeLXL$u#4p|mu4F9Ovx znm9iGz?u9-5{W2g!5$|^RWTdYiO4And@!+$6GnT*Fg^kQB4cABA+(7qe^ zS#9MP!KVlmom6$z-FUItsIQOd6`sDGN2t$x4RzO4S7xH~$(EqpDdPcT%glRV#x&vS z*%|D#u6fO{4H~w#@c(AF>RNhnI4SihzfT`v1O@$pd?g}Oxrvn0)a=?z^h~#)W$(w$g|AorQ!S7AaCHzMgbB<`6 zL?8E=(*Cyej)q56Ljz*=Hf3$%R@FIGw07+oHrX`U&w-!rvUAB<7 zqD6Nv{P6Y2Jq@gLd91rG{z-zZ($ws4 zY4y|fmlxAP`APHAC+8-jeTS?!S?U9v9NvTXSuT!_off56Gm+tE{4 zTZxXddLL1t^*rFu*d(Lh8o3b#=%qBx#3qj4jC3CWn8p8F&)IJ5WD(pYBBKTlT*;A)*8Su06I^o`HJC zOpru&Juez1+_;@SWu8bw%}ZaZd{XN>P8n_0)2Fw_Gqo zTU-QQp5d!H|HiymIiGI;GV4udpAqkzMff`kh#P~fVl&z9F;l%1bFOZ&D+Est>MbLA z$|5s*k+yql=^Ua6wP4rogoxlJR`*h1UY_=lkZ2zVcSBVVt@>< zF5Vi|Wk3t0mxhWi#TMVVLtldf=rGXRByVLbjy@CS8LcGjm@_`-wQEHRDXCzuf#aqW zq@<5fqw*qx|LIp;%`j#Od{)xtG!I3?FfmF8{6*(0OkFPQE3X>kbL`I%JxPCT1%0pL z^2}opj8SLoY@GN}A+|&0r^2{yvTM0iFTKyx+L9FWM*FE(leccP3R*{?&<3?gjW6E- z)uVl)sXtZ$lD+{FHZ7V-yD7t8)20OPFSf;q+h%t0tnob#ltoOH?5Cz8xuZnzt-GUC z#$DQTA@~+@vdwLQhy553%Z#=Nyg5JY#BQN}<7$$$#?UfgALKgT8Q=( z8xfte3S#=sxjz>JT2F#p)6U!~j3BYFRi4stA@ytWCsQe+-%Ox=IU_D%WlM*m`7sny z^D5gzmos(c>|ncdoyumi@AXjm%2G2F34se``pq=8k*u-gY& z-VEvEx!WsGvpm=r`{SlaR@gaAGhYc&-zV~pVIDYIeQS#FOowu{?gK21EOL)~g@K^y zQMriY_@`1I0iu67S-RT2wo(-A}l?d5zQkF^is8SqutvKG1HnORkZ&Kkd7QXYcn zhcVcM>a|c;m_EKBQC*?4{U}b#6;0kYK<=6UQ$Br{s_9q;8Jvip5LLy5;mB4buvUYU z*9kJ+0`*5W0IYlezaVzkUqU>IQ+M*0gwKvOY;sa)Yj4GP&&X(c{-F?aNZENrSZekg zMgeSF;?!0MAJ-O<$XJT53pz%C`_^bwmrKr*smbJ>#Z|48YU@)42oz zfVN8wxsK8V7Mgz%PaaX%L1%eT7^0t(9YqV`9;Xaa_C^nEi_p<~#z6H{PRa zGp6Y&vbP4i49hZ@%=#BhEe>h1Z8*$22d;Gc5Tdn%M(9}jXJe21p+EWN>oZT+_iwTS zWX8q5$Lo}=N{%hT45UCnNl^U_$!NUqsBspEcF{DqKw|RoLv|<4pznP-8 zuRMJhK)?8c;TQT~vNn)&QPXNAmaYw&y&Q zF3P4A-}QLlwKON&*BkG@{d&hlWX8idbczgEUOxV?GsO%1+=0ymRDIp9#7gM(J4RQJ zjQ3{*G>uFce1E;_o#5?#uxm1(ZJA|@$)#_)HTEc}0k+0<6mK{W%Z<77 z1H5$;m#A)`jIGRjxLy-4?(^K*zEj8!H=|o|((Rpa%*^IfgLmIeh;2eOHlm z_$OcbAwK_7y>F{_Zn$p;&EPQXf#BHa*t9UvzHale=jr}i-BQ2KJHs%~5ZvC@FMQ9@ z-~NtNVi7XqyrA(xk=u+5mZvk|m=;3@%)_vcHPKRmvd%%ox zE?oc^F40o6ChmdE=E0p6|MhOqxvd0UBmu58q zlB|fNGtoRcCA#GMA!6PVc^<#Z+WR7G% zHq~YyWnVm-{$2EWlNv|C#gdY9U>bU;1O4kO>o^c%GR9>pfx$Ufg-m|!Uxz3#Io6l-wXp4dsVXzZ|(4cB{ zEl8V6ez;*}@1ehU0%Iw6$bfaq*Pec>N%#jEAagJZNpmUFYbNoE+s)ISjg|cVqWc!r z>iMmAY}tu`c+C>{re2IgH)jHptx6+Tw(fO|5h}aO!>pSSi|(QVTJgoN?GM(Uui^3i zgE^{*D5!mmZEOdfb*c|Z z>MNnzb<)E^3C^(%rT8ZA#vU~G3HFMCU~BQGBdWIy4u0CvJcF<8eiR1k#YDX)3f@3o zA!EDXB6Bm>tFOJ^7Asf9R}J`^euwyhu~GZG%WkJ2%(d%IQhd48BCwA`s073#yv@7G z*)FVK)$j~(V!Dw}hHVPCTi_*8r~kT3NXdv*g2M=hiP_UZA1m8np&=*0-MV-p&bK*K ziu(xM34t`KhTjo&ivXOIP;fpuKu^COYt`#?VaK;Pjeb{qw5j?qz8O#?<FS2OF zK}$iH@o1D5@Fb%ynkD`So}}(X>(~zPT_fJVGRYnjV8%J)vTCSsnE~>85IYE+eq7T3 z+p_tqrYhoR#E%vZwUw~mMKj$I`@cE{89P+PN19=~0iX%fV#IXZRtm zpTBfA0ORMyQCR{kCUvB6XNzmq-U27&Sq$hso3Ank5SE@+ubzWt;$&pYdlGe1-F3RJqbk?*}%ag zhxTz_DCalL&-WDN0EX%hh4F&14)g?jxDPkRd7GC>rSk^}0D|NHQKiO#1YWeg7*{Hp z^4aYDrn|fP8QyUGjKJn7{~DJDHauGPW#Qd?0NRziv+9Nt1O*+9oRi@*BOl7dzM!o} z3ouK602)2Bgx4)jm0fh({S3|V?wS@BqU$Yl13K0K0>HH0YWY7=hXrxRMzEV1;?VBW=a&zQ1E<|6E${c{RdEV%C!;MJ*~OEDtM1Kd#x z_ERTeMH$~=N>;^d=U_3g2_L8_!-qLFe#sG5$3X^L8(meas)>!fJOqDyhUEL-|04kh zR{^~sqg<5xfAC>sLH?0H&J;@`Dmx`oWXHhpwxfbF1ntouU+row8Nf0E^|O)eiX;Ht zLc5LFshHrE&i2+b@g`60;%F~qKwvHDCq=CclK?o)>GQ+!N(>B7e@s|*9b;p#3#T@K zw)O}$tc0}SdGg_=;Y7i(ab8l&67Dj>DEio-7 zRPp1QdY~%;X0Vi)CtcQRVYg4+ZFq#O>n0QpfrmjS(!+z$L@vrB5P0XkFH2~^_reJ` z4Bw-Hr{!Sj{0UdWtdCrbejMg$#H45IL+Y0DRT{q6-*z2wqF^0z4QYqVT%8oyr2wBB zxux^;5DyMH{_@Y-C_qF~>(*L1Wb8J?YU*bCg=0{7&2n0Ym=|~V4DNfkj1bTE{cZzu zh?bBm9F*o|ov(XrDBW;2?117cSHV z0?TUV3U4BA%E`|HGUvM$F%^y_pu{wYp|Z_0W@zS4&n6Tej?Z_7Y9(> zgpifb&PXUL=^b7KsOt{%7ae`Yvx9+(?HDS~M#OWvgI?4Loe7X4L*@Kye-(ZL(8Pr) z@ZcpK0E!TA1B5%lM}`e8gm^?|NPTM&Xx@w0qna$x}IB?e9icB|B2LWr|Jk8`l*581)`X&X0rT9T;0=>>roPeJA|dL3EcB>9(bdK@7QaHbPv|0n<-B__I~ z-79(aMVYzDZFqxj{=5cVeq!&7Pmf`YH3z=F841H6m$W$wRR!bxxCs^Oz<7J{!_nxxh9zBI1HJ#C3 zWJd~+ypYYxv7t;pEUj%LX2@#7)3rWP0j9Q&Fc}E^l$an@#d`fG6?ioY5&w*V{%!Ty zi9`Wi6Qg%J`vZCWlgfv2ci{UlMn`WD2FoJw^v50ACNv1wB{bP@T4`Br^M-c(n7oW{ zwp2!;QWiwx2E9Z+CA_S=YUpM_c*f&Rf%`;1(0=ZD)Z~KR6aud`0i2vh7h+dZkmYFr zD7(24-l|E0oWP?u$H&wEHlSbQAqslJks@k0Iw^T=9=_3n-mugX7{NTLAG67k`ucXE zLp~~93GFeCPslK!iWosmz_?N_&5k)scUj>6vXm$@nQ)QoTcL zKff&L?5%>;?`uCWApqEz+7d{|4A+{}KJ|OuL321<2B}#^!N|YAJv$(O{OL>gVwTsG z7L8t;ldc~~FpdC`=6$|7PB}A!443ZO40EZm1frB5*CyurzLosNh;R+ZL{_mox*+;XJ2jJlnG zN>&r+SusopIP(O6(S8MPC*{9r^o^DeUJbP>o zrHlu{3}pUNPf$^f7>0C1gRVHVzKA;N&Ixoo@dOtUY+I80?S^B-789V#ZAFg&FC1YA z&;YtcKj5dmgIcSnAJ!qhiBJjch|uCd@14}~02vFFldzcYUd&yNx*G2SVAS$s=tZ_s ztPmVlLRY9BdPkB^|4#Q_X+$u68idkTW|#_Jb*Yk?a-{FWL79 zzOXL$BbjqMaKmmpoG&~vLDE6?=RL&^kPdQPM*V?_dRX|O#76y9o^k&27Ho6#H9$za z0h6=v7<$GFR6vC!cz~l~`H~!kf8J}YqC27xHcRQ;U>g+SOM!p}jeMJ+n}Cz^ zj35{aJAfT!&#cBQ&Lgv73DU5`06)P%F|KZjk@u zWef_m6FUL&>%tYp-7;9K!`A_kc1@-sxW4ZYS}zhyH}8ay?S# zB~&9G^vq+*J9}@RNL43Q^0>>PJ-Aidi!#deCZGPcMeagY<%fRRm!EixRIoJ+qx^ui zkDg!|EHC#x+3KDI$P$HMEa6tj?Qg^Pgw&M1puK=+&tGE1tgpxXUppfHUtV~$qWghX znXRh4^&NpLsXbF0=#j1cGSy*&|m^V|sjWShH`jHjE^a12qGgfUw(cV2q} zp-4JU+L8I}aq6Ot^l?Tb!1-{_<6%XOkmse~MbWnlb4-0ys<$O3wqz2gd%@5hjG;Hft|ETg&)JVtT0l^!) z*73o8Pxr`!Uu_`x*q;B2A8AF;jGuP?C6p0V0&o?z4r2-1PvSk@@!f8}#T z8oNRe^$ZcsO$e`G&w6HAFVh5g_(Zmk*w=X{a6*J1k=FA;t5Su2-epLI6VDzogJ-Eg zS02^;z!!`l?pSeolb`B}48S#Y{DbqEsY2^ZZi=EM>s%Y~pD3%WkVQIe|F>i7PVxuP zERCTpIK$(&5~x=1@N6;Puq9%%oVZ8|t;e4xm9=OLtQGh%7e`I%`NPvI&DJ+@_dTHn zDh;fQqK}Tnp*0MyB!A!ZKNmkenGTkT-7-uys3wQ4a!-BPjdue+b~Ud{U0l2 zX0LB{(AWeZ*+X!0F!~L(%SA6xZoy+E(hMV`u+dXM70}Gn#T@J18cDQB72Tf(g~{^A z1FV6~RRuhO5;+NU{LQAgBE5a~M!f*8Jiw=>V z1xye#T=br1TxJ-IOAdO`I%^A}Q7kl0d*zFBx96#C&o7RWyTL#znG|W(d0@kD|YvtRg}tI^&Psf2TPjTal)IUl!KztBLb0& z>3ebUHQ8C`3Cei-GL4qfE#u9Wuk`W{go(%>ToTJ9-HwCRQScgP85k;|;QBC9YZ*h`&Ff<-C{vRW+8=L=pLb-N0$su4P@?xXboe!je-QEi5!svj*BcztfV zS7(7J_@a+e)XqOk$M7iq3! zT+n^hhXsCC-f)S?J+ zk`lpU=d%nrv_63`rK-`zg7egxB)ZNO9$NR%Gqq&<0k{LamRplNcy>FMSW{6k&;=hw zMNp?|pUilYvc3^e--6M_>`+Hdw9~^z{hv;8$|Zw9TB3>&^8_AW=ft zLdE?Y{-QC(1|hii7pk1*{P~r|iS!C8lza7^(8bm>a3{Iy@l|;H1$j|KY13kPC7?t9 zykh!$t)4E=L@$%3;**b_UY>b|j;fLSIKIG3$?^_Odx*dqJDmX6sjP-5j7@DlYOc-g zI`w|k2S9KZ4gJo~JTXjZPLcJo#`@%Oy>(@f^O1FPkjI5E3am-lNW#?j6qB}*Vxf|S zQWyW*{$bnLU>&Z}Qp)|3VYaVlC9O<@DbT(g`!3P$)g#mF5JXMl^mX8zRO_*$)^Fc;VSI*2E z$$BpOr5G6@S@88AfwlL1hmnq^4w3~cUMjl7kMxkR6&D`pAm?F-So;xfny!B>ZTYn} zgBS=N|0SB3<$Bc)hO0{{`FK6;tRyiERDyE?P!JGk61qkR_3%tU9qI(ZUG-KwN3R4) zN3{3LW7|lW_zuJWI zO%soj)Z{3yQ)5%DmX3c`J1y;*>WT8Gu4S~Ho(m+aI6rG^)EC?oqPjr9+e zBs#{xDkWWyqifIp^*UK*yekIK%Tm4~!r2a92G7L5M9jmCVrEcL^$%$v?|6#6D#?@UvJ=%SRT4S-nJXBl zR zp#OEGYfb{iJn&g);IM5d@pH-FjyHGSf!6g%RUWK`l8lUi9NOG=m=rY{$79&}R4~EM zpasunDvJ2n@iJ;udNy|FxZJwyQrfiNfLTW_52@aWpoq)j7+}39rHkGzPTZZi`5qj1 zuJVKzv!?E?0a($z3paO#5^3GaMWKGiB}@_1yIvNKxilKbmVZe@15~r0z})ajSf2CzS|04n#F> zLp8#=PiO4C{bCy`U!A6ywx`mj*YlI1f)A91XQ}aL+Owd>5}dSX+#Xl07bgHq)SkwOF@tn z5a~SYW&h7T_v^jq+umb1{P{kyp0(zhYtC?cjdG^HXLXO~X(qheK9ny!y%o6yZ@eE- zH@C23Sj{=M4@3XKkDwpr_D2;4b+f;JD9ec&OuAAw?%h{hFJhu3T{5}LVvSH>lx80N zVYLvOmHO~=Fnd)krK~0T#Gm-x={vICIx8as`X@6>HH1qDgl)Ji)22hhfK@^UeEtt68i~>0=>3i~`fo zuEjpBk+!F3Q6)r_+)}EhUYh{CS8bKV;ix_p63p)>$T2&Ea)#xiEVCR{%u>>E^gQ~{A=?v zLQbz)z^=qgihTI6qLX9Jjm`L^Vj|`Egul713F@5k9ccMQ+4u0@sJu7P=F+f8n?t>~ zIMfhc_@#RF;L(z2deC>QL(KDxx*XiO{2V8ut!DnDM6*Ni)d=^NcX=m}RJ(`Tkv1Qm zW%PI~aAEJ6pncV!q$hLQypm6Uru+2Y)xga88@HOTZev*W7!tj9R6Eq(DYDyhgNZKwh|UV3u2 z)>17rN9N={dJgo?TZ5p!B@nqjQQp1KnLTH%A04HG4=SC-RU_?u1PtFAUg5Z&k&C*L z_IiATp@4T;Jr00~V5K=KefZW1LtUi1S8m0@1f`hIaN&&8KMgE9H*-?$bf_7TruydSiM;CYX@t=udiH?#I>T}3`$nD8}}o*Y2DHf+x1+5Oooq7e1II5g>-d5xlCLDl*^FuiA#~f zU&)1ICb>tR1OF!P&h7LePvkrmw{!dwGRKwkYHsu`B;oBdfCw#i(FXZ1UoUeJ#~}m` zqTP_`PVD+1&!Wgy8*j!B51{w}#I(Gb z#TP$(!x%K!GatvNA=;&t5A<)6K@v(a-^zbZMAX?C?W7okF8^|#r$ePi9Vi>LSMX5C znU_T2`uJ=mioq)fL7Augb%OQ@!E5fMnXGYCe1eOA#*{gZhakn#Rh@wfOZw3wODf+?Z*71V}qA|E?R}LDL zU)BDSeX!KHlo<9iG$)K|hr23e$FOeO>2Ric+KI9+Jn#Jsf;LJyAoGWaai}{GZS?4r zdYnJJdW6;S6Sz)ecaO~X{@F+hIlm%pmJaleZ3qu87p%%gz=qb>Cv&qe!v^BQys~dv zb%;e%mm=0Zzu4yMNwnV)hM<{yK{bnEVGA~bF4)|)X07-e@G+U1*Xrj@lp{}cLv;I? zojx8J2^IPK7H};OZD0#u(XB)g@)LL~)!mDRx&umUg$#~A;1zm9mLwexEQIh%&=q@> z6fe;}sK(dJ6e1aWg5qjq8^k!%)SwQj%0aRY(O1T`*}`n*>!)&t)2kS}*jqdNvJrIK z#iBCV6_tO8Y;x8g{GTA|GiUZKp3`EuVAOvi=d*KsgdqMg(c=Ec>_EH3%$gqD7Lz!g zc+?JFpR!lKKto|XI&YWEnRlc4plJ&s6gYRd&mLO5d!T!%3d2dKx$nx1&8JEI6p=Xi zYb6{_s^okP-lm)PpfWXAkz6`Jq)m9fJutO~evihdX#Vg4RHpD4iS}Nf@i-l~htSu_ z@Sw>EH(bO`8RZrh!jmUETaIIGfkQ)$Kb(em)Z&5QKU{gJY26t zyYg{wu?KxHqQf~gu-XJ5={okuj$B9hD+CxM3QLjk_@c5diuqf*9{PXpV)Lp4uhOkF zIQl#2`C+_0*WU@D*Z&Y%Wc9-MF&VcWNq& z0xbd}s>rFy2RPqgoQt-6y;kv|!p7esdBLPDLe8UbfKzgD7@sZ?gjt;$GX1NE&*kgU zJ}nLHyU%$T)3JS;hDp^;7{!mQj^^ihQ1XYMXN1_|)qclY;)S@~%pOP_7V1gnr>>=y z6M@(JXuc@ut|7DjD+R~~P^S~03fa&baVnNEIWm83ajsh;jk?MgM6g&D5>I(LrXAV# ze^KHj2=a&gO2HS1;cT`ny0jLD9>y%q!7g#i0*rFH=r@{MNA8h3VYz;rh`kbWl|ICp z)Uh>`11~8eDI9_O{2SKS;c2O9IV&;(D7InH{U*6typ)3Ey;`+)qB7EUr?t_o(@#dm z*XrYL3GCKCNA%mG0)ww&rLpSY+86nHk>^hFY20aY5zS{~Xb(G0q_X89eODcA{cwpd zKF33(mm%Z@1di%EAHM#u2(3x;w`A-9-)17?M2pFR+Plz#xTd15xak=tA;6^{Fz3>a z`&oEWl)vKB0-*SKJ~in!TEAYHWXE#oRo!|B5$d;!X}+U4AYy${8Rfbvt7>CR?~Zpt zA;?EUQzxq(T2`W*i#kq(2<~aPP>3l_^@y_6scm!trY7dWZ-d4Q07|?HVGbi8NBpN~ z#ZSh`o}MX}#!~)L5OFvQllw$yUwh-Xzx*J#)w2$W0pom9S-Pa*{A0Zi&2(mO<2=&a zf{?(ef*&sV|A(95HEK9`-Zn~2N(7w6XYCdtX2t9cea11svFH)I^uY{(fOH#M8*Vf* z()5i<_~pK$^R=)FWpZw~(en!>!+yXRsjKnksB!ym=%j9OPRHH==Lal&!Ol;I48#RP zr?@MhVI>xN*t!1^e9jmLU5aX9p1lq*P>~f98>-^w2bNfOLwH4-D(hjp*mydBoED#fjqMZMaIx|es2F4w`gZL{jF3< zC_<0&M<$7yk!G_<2gjS%|F~3U^aZw;Tq>#m=pp->Td9KYQ{meOfe`TMvm5~JgEu+9 z4%lr#E5hYtrswMhsN0EAxr)h`-y=dH^Np3dMZXqy;UOM>zVp6*E)CH{e?8YkrwI)+kOhEV`JUkf>{ILh}>2g)--t<3Q<9eNoc|o zqC#wqG9?}o6@C_LpZpKUV%+VL7il20Sfu-({M&zw9YkR1Ca|xTQGyE@!X=4ei_@>< zbW6WMgNq`3MNKJVS6*538UPU1Np3zL=P?7#AZy_|W`yvr;A4dR9vme-GEGo~oFnopccOX-}AUqVRva8J2?3&0nNS;!m6 z*Z4u{Dy{Dn`q{H^QoRm+9r8^S6|!5YlAP(B-= z*?T@~P|jUIoZo|WremHvWeGD}I+aQ7#ZBkWQnpb~;Xxao42Y^R<1e+#oTAgeQf4n*`-$demfXb3zCaS;hAMHyce87j4{nuh1PhP@yQ;p7B6S3NynQQeee4a}H?hP^SPS*YIr6a(2ZI6#L1_()v; z$wiX3YNrk$WU9vK2cL-$G8IBBL{k7V)z2idcap1Wh$7z|01)TU>BI&}U_TIL&@VSH zV}rHoZR@;2ua=WpGlMGV0Aj?J0NDzFhsM=n`DgT z^8o+@Lk5n!CC-yOW}&-+=94zFO~%u5T0RW`E!AFkxg#iyV~#x zpD(1K4h?8uc67JNiV0TL=4JD*a2G zbTFHd?*pj<-E9k^mFF{6zTP0(!wZDAJ#7|-i%Kppq6f|5Me>{}NZ_g0y3P>DRmsp; zTsqanW5_U2Oy;*Qg}f} zU-mofb|Ld&N=H2b@Q5#2wP;DZYy$B7xHEF-RJKAsNvdCKH}w_rNeYVHI&OX6XZC+T z?Qp?Mcr;8HjJv=Xy@3Em*xweaZO`Wn;{YP80~& zBl6$iVP){Gb`aM9tuO-A;(e++gNo22{}OtaViOA)A4$5yl0sKXS8IpHeG|wCKU_}0 zRuYSe{2|y-${c_|7$bq@Woi4y?mHqV{uy`S04g}i<4K@D{}3fGdeRlkUC%bspEU`e zySaM4^P(lZgx57vZt?(*n`C_^yvRNWLv{LNghZ4R^Ca59nuZMG0^z?}e z-`5C&$Hc(zj8w~zN`1fVPt*l+Lv7Z!>1zFQPYN}T)Y`bXNb|5}>61tTHj7D=xzLgR zrFrwx$0-{`Y|EjKdA$m#nkLfu525jrg*0B0NgzZTB!{mhEBqHC-|xDa+At0xViE?m z+8q&e#n?h#*!`7)r<-0aDuu(P=X|I*w{Fca?}&M-J+{^I9MofU`9S7`bDcN2WAZWZ zW}ej2UbJ&ykEev+R~PE!sv6Orr|^2}d5=7Hv}!qqus6DSVkfp@+GSNSuPOms;Nk#Z zkX-MaffVe#Wr9({rQI9prK$)pqW{{rFsT8IFHtp51bN0B%)oXv zGW|iJJeL_SMT1S%iQGx$4_)yxL7f%6kir#ruOdx5`Xpy~`TlW9jEbj2a$|j;+o0t6 zF3UQR_L(mtp9|Fg#6l4I9DK_3AW;-N^%Ob`XmVft6P{k!gtupzF4(Re@dM{9bs_KZ zv96kj2LcB~0?Vji43^#gJ<|^%3>>c*0?k^nO8${Sc*ttLN@GWO^CpMiQW#_}z|AO% zWTtvcQ}@VLFgD(ONbqCa>G@%|M5F?ew$CE;iDFv64c<#-zI8}2Z`I~k?*3hd`-)p{6Y(o~ZCs}`7^kVpToC(`*kEU@NEC8>Q z>&5*T{%F1)4rxI*?-E(^h+dRx^Y&{#+{CWeKFMa4Jm{Ct2msyyf-1U)YszMJnTzQf z8*{D+DBj_5{f&F3sNb8Ck}|?V{6oZZgYD=?svzU8g^y2NxS8sWzMYU62a2&_l?*9q zqN(8lf*uUi<8~q@*RCnXMGksw`sZ3NXgo%=5(2^+V_d5p{OmKjO{2>qK4)X|au<9j zh$cd<#(RIqXly)>&TO57gP){0@b!;yd+|bn2&vZ7#@n@qMW5-+&Z%%V$LvH2zV2Ce z7>NGn*M8sQS!-mp;4oY7fk1t(;ZhxaeSP7aVYeA87hp`A-TY>+Unfuf?0<|i2Odjv8&jzEQ?Xyz^HUI$DEZz>i!`MPD6i@eDTh0C1 zQmi234TiUrym(8!3Lk{VD;?YDcZLcCr(1$r8la(+(*s(Y3mhCAmEOZ5%GLS#9CaT$ z$RE<7^%RYL)Swk`TkOfPDG%UX;G~rHY^lav!B)GUW`^j=^ zrFmt0KJgtEr@2Oh5j(ZlcU*BP?&S6UYEDzDso9A_ovv~A+7MDpzvqFBva1J#zEoiE zpPPCo7h8EmS)n7FzL+Qb_w^vATx`j<0v^O%bv>r9_U7Bmk|?EJxFH0v^|#TVnVCdW z!^&6)%|3MyA~bz?8V^riaDSSAjVCDyeK;;c4)BDY{(7 zpXr(*k3Um7(&_na1L?%B`x}$esJWD^<`3_pbLQVwT!()3oa1c0&Pb-HjCdz>Hu`5l zPp#Or$yb`QETPQ2JyK}DT2)>sIuDt(?%7-~X)#@+zVkR~eZ1s{d7>E9b(eV?YlcfG zW5z8k;gUbI4dYrE9TRByqCe|B)>;01zZQO+L@zV5*!#>nJNS)u!*&}swG{)Cq3{XR z+tiNlpNe^Jzq#@0VM{YX;)V#D?fv+3nZdoy*=byA;gK)d5)VfL&Afq@RSRv+JAX0H zii(Rpu}L{y;C?F_vs!T;QPGR{s*zp8p2}_y4%z!mfJp)INNOm!iWbD$O*&n5u_ajk zrMFo+aG@s#ANg#%n~C+%qW&H_Y9F@C_rqK{=3OY<{1Wh!ksIChw;0vXjbqK{53g)P z>Y#K}L3~-7$-2*Vtind4TZ;cJ@kiBH(jH3Cg>9+p>r?JNfo3q%;tNv!2G1h4k;6eL z0gSFZ{t=)#>u6QDevR{q;lFV=wVRJHB5zBuaM^_-VhY@Ta{Re}JrCWtz#Jh!IWxch zR50wO9~hX0k1#CLaj=Y`EaLXDKX^-Ew>r?kP4|^9kuL7FfHU`+`pA8|aIpx{cIbM0 z_Bu|KTdN4T|5oKn*~L5RqUfGro#4S7#S)Se)RV6JV3G;XTX zZ<~h_*lh%{^*(X_Gg|P1t!}K);1>)B-qHH(G+EC4*WcFm6Eebue?L^}rZ($K1U|ad zMn&cm**59ACbC=cLkH}qHW3o0IzSWx9%~~Vf4EKRnWFinheH3rf#N=Uq3X!#y6UxYAdV?=@;x5@<{RlTi6A3R5zME&pTt_YuU$QuU`)QX=Yf+sf5?;_f z?JFgb@PesdP_6!7coF8Eou|m5Dc?!hp&zw-FZfIIMY}~@n8OIE=`RB6QM9Aar7+hn zjQ4?eD?}(8EMH{nHUoNPGv4n0EJY?KUxYoziyzet7X|Z-&4QBV6cuJ>=66)zP8!`? z2i^*HRZvkp6Nrzfvl*hHpVf3WGhK+=ViVCVHfDEBq!bv0?~LiUpxPlWc3U0Jb(2Iy zg`znU((B?5F6_$mWZpbNB9dkYc^~UoLZt1;oPDkQ12tNJ!_C*Y`h3Q({+Lt%&s=pa zi8F}jn{b^{?{=C>u6%|;eAzWA68Tyk96n{Ji(KLTc(5;h@8L40<&3TYEViEx*5{voeXv2CmVvK5ke{w&G!_WL%_BHtrfNG~l<5 z3OE3pFEVYMyHrQ4qbs!kDs8{BctNttl#w0-q~uL_?r(M(l!?nx zqZsLFWq&q1b`B(l$1lGH^JVr)Y)zG}KKej5c5KNgjWha_vzxA@TRwP8E4xY05Bv#j zsD8ggoxe@FK2i2Qk6F_<01n=}mvpI&Zwd7Z_35ML!&TGo*lG`l_^8EHKp5_u7Y?5R zx-us{1N}F_$8vu{48zczs97=Q2@D;lDw6|u+TIF9^@P8=&&V_bDp>U9%a~$OxkNVM zSyls_7I=L^87xm2vH0<_d$l?|+Qc}N6va$K>Arij+dRS?xq)27(8qML^nO_TAYm*# z^2W|IJF1Yu!7ksY|HD(IDYy1(XG&vGvZpOMb^aNm@KrL@uU09+e)jFp0;1P%M*`1c_ ziyph^B%Ctk$)Ri*m&#hBDJLB4I5F7mnCEm{l9S; z*J`$v+h#8ANC~yg-wP+=z-db(jEpn3nM3=hDyz-XN$yx^Sf!lGm@eQv#$pn74R;1i zEV<0T(2ZbEFQcD+eaDtzXj{5gp%iIm6NOWa(|7dN z$sa+25fz#$f%SFfA%QW0$p#@?EoMRV>fBVZiHJbJ@sql=W7JQW;-)$@cKpY|>{>65 zsebet$Oql-5$o|)ot86+MCHk})09e+yC=(n6<{2I~uE5>?2ilXLXvE z<-WKfc}T3Y<9~D(j|#Ew-EEcm_4h|=hrc^~Q-jb4hW;gf8ofyc&(UXdXD$U=6cY*g z47ng~M|5_~H&>1TlbhXYx!`^~2pQwz*C`_$WU44t+m+7%%AqqYdmrpBvV+0lQj6Z*yf$IIicSlbX|zI}6_*efh%p zec+TiEkmJwdnE36F^&#KpMl%wt@Aq|m7I;PlLg$z7 zIp?{%BO?wwDw}|&OHxC$zxApr{)te>^6ei31epAg^e+gw{$_qjAJ>%6-5LA!16x<= zxAzJSz;~7sUypqu_^X0w#y9&wwS)Wsv%NO(%vY!w$h3r(r^5D|DQGs zwX6J!?D=R1Qo_jos4`LHO|||cztXw0<97Y?OJAPvUguYSf@0H`C(|$1rH(3i!Ybr( zLZXqhlY!WJHtq(j0mEIBx2uImYL7p(32RHjDZ<7q*7+j>r>I%R3AYRL@)UJG)^=q3 zCjGOhW-g`u$padyr^Ne6yNa-rqApO=h#(S|QX;SslD_qKcO};yGi4-$^iNp^Q*ru8 zTuP>tvxT%5;$|N1qII(O-P^fgr+%kRp4*_DiO^1KdeKCwG?`JMUOeD98WorM^>5p- zje^3$)UO4jqoacbPB5ucc8i@|>A^>IU9}kgR2R0U6~E#W93Q6Z3bEGqWT=D-ICrUi z`bRW~sE4*glAcp!@yR5InvMk?{~5YUovP!6DT@PH9AWzHbtfhQs48P@u}oOMI7lWZ3YJfg8{iSd_cX(&&QA6 zmH;{Nq0${HQE$yu<3*nJuW!Q$v|hP{&Kv{a#q{Ks4@xgrXfcN{sR&bNoofr8Fwuf&&!x#t@I6^*JsDd3nnOqFsmjjY#K{!Wf zZpJzW&*~^ir6K$|)2_u$w-?sr>ID1v~ciY}`Uz(rhd?V?N|ei_eq^-d zp#)?7;?&wqT}56S1k4$Ds)?-sNo7a{$}kX`CW+b+_!b$}c$T!B`@YK@!4KzQI+%q2 z;m9w!PRyc6Jx67oj~Ql`E)moWRbuvTCa*Ua8R{)+*J}nEwG&V>UjHm$KgOwmIr)GQ zpYmu-vrT$mz-dPR>EiGvRH3s_jpmBhP4~>F+2vh3IoR6r+?bfx%!BaEQ59YCBW&B| zA;#l)a#TXe)-iK*mfrfCLa^C#PJPnf%>Sips)l*IYlX~-?50`4oE~IO0TI&_vE-=r zfpLPdlO0nomtT&F1*yv4SV5V$O6uG_AG5^#lub(=ETdjBs4~i%mfUlk>RI+Duz7ID z<)>tg)2wi>Shf9lv1~D)EmM2!{wM4Hv;3c-=TDb3U+BCLov>lhE&2kwBAKRQA z?>V&wqYN|UkS;xqja?Gyn3}4xr)FY4>g#cge5cO!!joO($% zW&uiBoO&Y_W~`&5GaS&oC~aoVFz8K_OFmDh_4}^fh~kJF+Yu(~b)P7yDcEW*c6>wb zKRX+PnIhD6bBBovO}=|fR9t&+Z_E6-hSKM;Qb{I`bl{02IA>3c{43hQx zBM1n#M~?I1;D?K-s``YB*HQeG7LQQ;E|l@`s>6|)Uq4Nsq9*;X6XIN@3A+`CUg z_7-cp%6|ESMt0x?W8POybMt?{zqo%%7mz4a{2gXa8Fw@S-IC6d@sc`q^@Ry_edx+mQ*W_mc#b;i-=G&!y%!@;K|LP#- zM%O3!JHlps(cAO+q;T=xs>8taXQeVuh#{l|rMjh)GLfx83})+V67yy0a|uvNN`!HUH2ufT)A>_!N6wN{l6V7H!?A1nB~g${d)UT z4BCjuc(7Amcwbk(rsaYnH6KdE#HFP4g3f>-H3L1IB>4$5qZA(po!eEiUb1;K^wbO~ z$<%+NEpNKS{|)xeXY$`an+{-o5}5D%NL|8Yl#zo-n&Y%P=rn5XDClDABG=+{n9v2C z3Pqun5cBQERtzG+<^Zg}c{r!|V4jG5@lWF7mmO~_Lb1ire=@g=O8@g$sbt%3@!~ZU zTD>~LPRY9+dyn39EX6w#qm2K%7xK@cPPTPV=!L~!d0b#w1cDy@c}affeGM^>oAVt8 zBrJjRn1ZMY!Hi$gQTHv_zoW(ibBhs~p)g%8 zN{b?Gm|K*>nu|1L{OLPqAKr8t$nf)cMx(N+_#M63ukM(NcS>U?zmRQiy-jb%Lkhc~ zbmeOPJ^&OWbDRJhBfhTGX<2pR_}Ck!pBObGf)m?01W(%tSFljUi|bJ>SFv21*fV=n zCe6qHX+Lc$&GZc5E_~c%86er(l}Kz8aAmsS^4z|`!JLTgjHv(vKCggN7(ZwO&*mHw zDri2Mw}oGCDkxO%Egte{{mZ8CMg#LuaD(`(soE!46dev%ucC@#0h6#{>^@%9;$z50oLid*#)){^(1WYE$Vu@`s->mp#3c6Dxzb@1I<5pxiEp^By zEKI(ie%dFd@k_g>01q*ql8x92_wi*0<!2@ADcKe#4gu=9DLI|F&wUK!YN0Xa8{Zw-W6@jK?gFuw@qaWZ>z#PmsKrC=s116C z1pgfcf)HPU|4y9~oVsDhg+lF5;n3mZZUjPrdGpb4Y?xcMxyfrGdw(_n26_=HB{}GO zRo@0tBZ15ztP7arCh^uvAm^9wFFo-DUcd!84K$r^L+d{3Dycr5Dp>cBj&2{qH2CAm zjd{83*SJ6bLbo&uxwf1z7;NZO;iIH*4kk$~jW8uK^&7Otsy=NH7-~qIJwY@++mJCa zkX-owKwM?Sp{a<-=I1u#w{~}XV$Sar(2i9XT9)hz6>zsK!-<)MO&aLx+!993MD7Fe zZeRNN-lf;YkjYR#)1QTd(Y?AuzI~c~5So!t&)CDW6tYas-N6MBr#EC$@~Qp9RN(qhj$x zhGbUA5>Ts$2}8yl0}`fJmXU}|1vfptz4yKlY7w&dB$c8?^gn$q6%WgLFl)2O4C5$D3eIi)qmg!2i$Jn6Y30h_N%s0Rg~tS&4rKHbrD%T zW7W{b7UayKtKuYR8CJeEX$s!vJ(XsY{YGq~Z0yO<>5N^5J{=_+k5V{p@>l-j>XSw*Np@;=O)o)YYC3NWVFH zk7Dr#I)g0z;v7PhmrtM~2kDRNjt9W7A>Rkx(JPZTqdeCF08noOQvHWcGdcM;JV;kP zOXgV0pU4ULUM^thzAm&At|`N_LMtp8ELyR%D<2zF|7nOz_fIc``=9&})$2I~QD+re zyLAqDxnyFB5p}17Z+kNyZxk_W#mSUj&%Fp_yo|v2l4`C?CFpZ|-!w&EP_i9-6}JzK zXn&j~Bk@1ggQlEd!mKa&mNCf~^4CT~k;&S~8_?XfZ-bfB4_`6_UEdi-8Kl#HDTcCy z*(gDDr>7K(a>EDSfQYsOr{klnh|{%_+664;WX_OPW~;Bp?q5hiF#TmIIKy+$%e*2` zM4u5Kz86h}$;UZ`(3Eby>N*2tM>qraSJ#2W74gWj%>=a#hue*G738w>{DIu4 zzSHfkNjvrs01Qi_AQ-Z-H3Dx$%c?e!l{aB2g6qTrrEKpVdU)Dk>2H({G_1Uf-8~lh zail&0fo(TK0wkLsj-dEg)G#Y!Frfgb|fr^l~EKGl%x#p~}ifqI@f z)RegT?_TLpI0`{|Jvx8HIkzpI`3+UXByu-mR1CtL0B{mf+-|MdX71?KnPca$3gkL8n z>gSue^TC8~p_)KNU|)nkR$!-dh;1qsTWLnVE9BlUWx`nYebEN^@@IjRH2SlMm`2Ec zZad^E)QAfhTZk;hJ{_Zewo6e@z%#9kAH!d1kF8M{aM!<*=<3sF0E^SJVJ&);47zN8 zfx@V`P&nYZ<|vy{CO|pnf!@>ql4^;@i49TVWfnk)Xr(~;ac@{M)Az_9;?mn7hsTOe zs zJ+Y1Bm5C+Jzd4mg86M;_G$hNGc?zv~?q&NHtKpAZ`(m2U(`YW-@-8^H`_mz0S@ z*X+m1+(8?}aw3HGRdt8&R*oR;vi@tST7p2B-uWb=FsR1!oz${}knRIrUa4*a*pEdL z=p)(>#08ZF_E5?To5m}qoG{1kq6_8ar3Fu*rr&NZfdAT6qhX0Q*$ zr3esJFBM+H%kSoU$*(E+!b0FLEh6==kFkQx)U#$38%Ond67Mmudl}V5Bqg! zITgI6Jf5-3%$kEoJ)aQL@c5{QLa4Kna8d*`IUkZ23oJeJA@|$xHI@HFRx?yQ1lD~A zg$I`>WrM%)RURUw)DDf=9PZ*$wmcFC=_E^$4)Wr*38UYz(%2P4;(o|e?;(+8xL4(# z-Q+((whUtzdLS|&)CP&o4{hFBTtA@wqjHzMT(q{}E1}OyO<LAK}-W*042^`lwDY!VeMqxlz2@#NQ-%|>ViJUxO2>kSFx zG3yBR?xg3FPDYN6{G?>XAS3=Q7W)8JTu>fT(b{V%yl*`ngW(o0#wShE;%e|Eg!s zY}Eyf2O^LR#My?>riX$ob#IObo=)H3xG9M7BOaEwyibAQsa3$&^#dHpj2so-izlC1(-}zEkeD5RGlgW??#?$cNbt3OIzW=`%Nj zZ$mVk8_t%t&w5DJ=^UW#4!m-9UNW298R#{xSy}jJyU#>Qr64|=Q`d}N@~&ciwhTG5 zr@Q_|8?^w-I9V!hK8w?Sh&Q6X{Zsk zqXIcKU7tT|3D2*_v^J})h*=v1_elQU&>@NP>?W{&GOQad)`LHDB;`w(vi*=0aQHw`K^z{>r?}IxWA7(C zPw)Mz{&L%1#b<1wqxOlE8M(Vg>;Y%2Q2|k^T05M8!1s$~O!Q*t;&jNWrOb0Zcl%K^ zzdgF_onPyaeqq+eql;h@M4Oev!#!1$mN5HuIgMkt0}{~N=qR*NGIW0_6C}Ga4{O(M zwrP|8^u4cK_M-TE@p3M)56p=k8$bT*yX+em9G|UCY@=FT7KK-ELQndVlUB&h3WoDo zpM+BKMU(6jD7)s}rN`vGCe*$ttShc6ItUIluSv8KBV+s|A=O9H0|B5=g^#d7=Pch5 z^t1J(BW{Dm#jEa$tTRzs;?LSfLL#t9@d$)er^*$EAdlD6`LKr}jkRF8v)DVe3DW~o zMNyo*&rU&^?UP|BQVIyqtIoj8$&J#K9;}UVw6Y2qr^QfYRzy`)j>alv91aolJ>Rcs zS#>Pc2a;ia$^?z{?$w&d1YTvGyZhN{Fnz*Xg;JEEQa(4E5u9MtZ3*3 ztqL?gvU_3s2q##lJZ9bc2O~>e(VtZ34ugiJZpT6|nwqetXgtB6SE_Dg03Cbe9bq~2 zHxcu^{C_KoR8ynW$DSR83gNJa_hV&fR_=CO_v0d@dq<948&Y8=+|64(RVWb2>D=Hb zSK|e&+>V7<*T}nA!aH!zZq-F4@S9E;Jr(<~@OAUwLL{Y=rbTEuqDNl}R4ESgcEJjz zz^{%pnTXpvdq{iq(`)k>^^n*a5XU{ zd}Rj}`@dOs#@!(Y=-e3BH?+F=>qtaz#&QalE^{s2StGVBkFEfQ81E!&k&e(4Z5|&Z z`awl!6zwx;zT;F*pNit*>K+mvK2>zGeM0mD)Mddt~# zJbkCHSyjwSjOSTp><_*1<%RwUKY&*pPOS)S^q=5dx8T*D57G+NUwUBW;V*}o@l>@# zuj4o;-`+y(zJYlJ$sJQpul$AP^Su(*BTi|SIT+=6cMwB1-1=@8;R8LU+Uw0f%oi+k z$&bzr?}{bS(J`(dL+JM@I}xRy`dFSNkKTvCOm;ZN}3iIVB2%FAs%4 zYwTG$VxeoFp{l6c3`Wsw0rc-vcK^oc7wU9qt9>_yW;=yk;2Mfw*?G(}y>OS2TEw2j zH&x6xf1z#yiqyO6n0-v@n7B;}b<)ZY)FsY#WwR_Iv3W z9S^((Ie|Iim11*-6Nk}olvl`L=kdcHKDF+O=*BpO?IM!!V2!n(NA~n8JX{Iyla`f^ z{sxGUwXv}4DQY#C#B{SQSG9oyo>JgB8fkil^zb82C>nX zi_he!m5i+Z%DA;mK7&Bze{3NfTsRz3#z6Xr#I-X$LAe-&Mvb|`yflx`vY6~#hH(R& zG$Ry5P!lB4pd7n*rRP0$$UOvPyY@IsZaMTTHGYC*GU4{JWpb`jb90^e3yt4%zcv9C zQ>Z@t)N%3Y?TrrdXA`Oeo#?6*rs=9yZDM$<*QI($(o1jDMCqW1ED@_*ElO-Om`93G z1d1go6aFMJpZ@%XpT%D8Qf$8h$z$PJXd!&~n-oI8?pk^-`KU8oMVCU64tt?XrQgbe zD20=nuD|srpTa)n_s1-?j?xciN_DIW?SiN0iMn%Z9d-H@oxe-iS!zG>X!*G#(#%LaQOB z&MPJnnw4LRH#-8JHDwmo=3A|*WSE>N$z!VB5mCG-m*DUo#4z6eUwjH5kPVDq{ z-XF7h(iHc*hOeULAWq^N)imFI4M^C@WQ-^Bbao<0LfT#3nU-$YOOaq1Tv(2Leg^7E`X_zT(S|uM z^%d+XvRd}f%TB-0G~^C_8Czb3a>a}dP?0E$1uwt=As-!O4_zAA^1ZKK_o8ULc-i`y zciGAMv9ZIE@0M>YJW4O`{L0YuUcgx=OgC0D%IYY+Sk(wp9u|Y)4y9BzOzBATBC|9O z0^%x`6)Su;F6lU(9pIO2yQ;)d8&u3)JrK@A3Ym>x5@WC~>Cp_t+ah8egEs5_cj8A?w zW_X1OzIWo3{AELF9V_zP%HI^~(HUiFMp5r;lEzy1#yVXekFX9uBD9%VHj#UEiZOn8 zcCD<-0xj!%M{KNqIn&!`zU&>xVYxim{CSka&j@N_4^(`OLmVAYieF6~e&-5sdT%$h z0Jvf{K(t?VJ`=8pBLH)vExGcOCibw@ro`gqVl+AF*1bB#;oat4N&poNz!`z!**=kN zGquQM1B=OT7n9f%o>`2iXN40O)Ass!%k?tveb;^E zKQ6Xm#V4NOIc2MJ^b7lYnRjA^dN!Ff!Fr(ANIQ+ohI!}9Wr>sR+=KMkY5vE{v;G=y z`bE8#m~&4!cH&;akB23heD=qw$foi8R`&7j)hzcFMn86<-ykU%AYK-fr9T&mh;FKa z9>j56S->D?$*MJ1<_EEIOa{)z?Ka;^f6p8JDziMv_O8x1!4F72{%JSc7D^U9g6Mgc zka(%Q% zz38UfoAu&k#;tQrSqy;e{iZGA`%Q}tWt7`?%X2zieDn39nla9M^pD}9KNtJbhCq&1 z4~xNgDi#zJl9eP4CbLzO44KdoX!0!+o-FD(De(BAkx{vR$i~go4wyjpOsHhAU_Fes zprL;xv}Ov%BpCO7T}|q$YhV6SwTeae)tek@bl0jp*(=MckNL{#K%?=R>ts(6Q}mu} zD6?zMTgTn2J2*SiGY(hFHZd2P`J&TBzuZa-P-fVKdi(lYC0SASFG9rMR+Hp1cnLX+ zX+5L`Gn>cuU?{WXkcf8tmeYP4P)tpq7k!6A)>MUtDqpx0sao3|ZwTtlXyK*$c-L4y za8>4uxc*ouGhezZTrCpGRx?_iVuNslErdQ;aa)$^2Gd38$HyQC%*+Fjb_|v?`siP` zA@X8$iCXm`w%iK1i*-%<^;NCX`=#0&-ddd#f8V8?{R^$Fdw|M;o2E|^nGIF(u`1Dc zuF{<$-K?PnH{IS6riBXP@BM~DZB7I>oOgZ-acy4n;54NC@Y;@J6wQ^^U=OdG_eQK(~%qX(6f9|{A$}IT4C_yGe2%9 z-n-<3&XsTPs{*G!w_(rSwKys|Sdd8a5&dOF+j!`yvOGHm<F!2KN>L8oB_MGqX{1X!qzw?I;m|3a z(j9^vP`Xn>=iT1_%$oTyUtD*&*2T@WQtBt+*4_*t8sTr8t@Ac5UD5l;*4k;j%BgPh~N92pWsBAS$PC^4mGy$xEy!1C&4=*!4% zXEtzv;5-(y$VG2sR*8SfW~4VcNAc;&h>!Bo(rROG(3PO`QbSY0t0jg*^Shm38dNl6 zddp;DB@Va*Y;R~K>k4Z;b;ilP;GBoBN;P^x6cm@`eq0z@X&4=rD?wS+Sv1+va75CxvKG2^~nt!#>aY1`WX4=U39qeKdgO+{ON!pHA5 zRHJP}cQ=T@U_v_AI0sx`A-n9K9zBteMWgxveMAeJNJoA)9XRg`7IHEY{dqo%Ts*`^ zY7=J{N{oATd1r_@AZL!RCnoC;uO9CLFCSPZ>X6g|ePpTHmRQ~iSFGUvS{{*TmVSjC zvX&^Ts2{$Uj)_oyPv*m;N*q)#l&(%5T@^kEx zuBXLsIPd`mvH{`J|NnQXmD7kvf^#>>nOr?@Ir|SdQ9;CR=nW;&%V#x=gs9$YfcQ`W zrDtB5sb;48G0Z&A(mPKs8ochJ|MTQvD9@O5yON>gC#OQ`RuJRhuGe&ukb=XDz^azw z7)KBJ`5F58golES1S)D&@Mhh#LRb*;5pr4U12(&T)0t@J?M_gVs^!!8-N~hOXyWYZ96$qycHS`e?XN$>XkY{i7E&o-ydX ziQdjpAoMPdfEoNdf?qDe#nktO{U$V8b5mEeBwsu$a|MtXE5#DfJY|&OByKr8XRvdr8sjK!^SzEKxsQ*AzH-vuKv7{RNhOUzy6uYK{BRUbD)@P*$<< zFr~O5iRUqjnEYt%mjS*l9`E2C2&SL*GEvM?CE(n;WwD2%nu=L!uZY=;RdSN{U!&rU z9ov{yV`ogHgO-((=i@fbx1O_4h0Op1JI!EY>hXPiFzHqGWzxM0uY@Bq)|L%)N_I6# zb;n=HwxW!C0tyBREc+A$&%AWnrYAQk>QOsClSQ+Vc`j&IK4QlL?eVV5Tl0(Tl^eF| zOyJJ}Q@}I>30-Pgh7x*?_dOS2$FZbe>h!oMOQM{R-NOz_#-VJt(ctRJBS$2prDHub zjU`FBy%EAKfaK&V?)OCIVr|SN$MPfktZK*_SMrNn=kUOZzkf$+bQ{wH3)B!MC=;=8 zG$v3(D4Ac{PhvJK3*n}*)0hJm35LG+@+l+O9r(fBE@CK}03u{(8-BAvDgV$ORZQ?H zXABMxV?rme^l{o_yOyB^n(hHQ4iB?ZBM&o{yGj=##GNd`-Amu#(27dA&WZ9&-QVeH z)VB)<^H#U4-KsF7et>3jb2Y9f`>y=C0pjBODzu)drr5zGU^Gh`>)>t8%g@Lpl&f*+ zXaX;UOq_x7fh^`}I0K7*2A_pOa)D_O7vbfT5#Y5?-L25;%&xMuxcUoEFG)clLVP1f zu#()M^|o%4_x2Bvt?YyKV@$;kAX#A z3?%q5`|?pTRiY#pKII@~%J4#oD7J<}FRrHk`-W1E3*_vFLGSN6{U`E`8sq%5$rq_q zQs!;ysV6lTkAOJQ=Xl-v1`11;ULh|v+T-yP2M&?5cMgK+zq`J;787!q zQPk214t`b`2P2!b(8m$_C>eM*dH1%;vuIBvb6&oxb1ci?Lch=Q$mpTQI}hdNp?-*n z*80U1{RD`=dl9N{^YE^muc}&9jqfT{Um!!Cro2Y=%am{L4!*6P6O6mF)$s?uML?{Q zW4A~DJVDG4VECl6_fQd(K%+lsQaaTMdVaEQr0PL6ufB$u$TMCir}`TYSx1w;y*Xe5 zm}*QT?2x*jiB6U%x=9c9DwDUH8eb)*Y0uw>O6B9B_(k!@ce`mE$mbDGSi3Uk4pAxQ z)}?ukJK5REu2KNOG>K>-&7ji}pMV~rH1J4EH>Sgz2c)PI$A-LM`8}LA-qIkD^ z-FHb+(#lY-m6j?EM)xVwo24W2k^owJfWvhUH^{~16*$-SdGbuNm_hE04Bn#r@QE18 z88`-Y>FE=OmD|tQz(mrB3v9_))=|Vo?^)x&8y^jHouAl7?mRia)5&Rp1_RA9CgZgJSYdusevmrSp>c)ok1p0`v=Z zf&5>W#U!V2xv^mx2TBQS0B~qXLzsFp50v5ngtPD<;OT33zNhWecm}L|?PSFzy z14vSF9_jTo8Z#cW0=)H(_o{#8@6~e6S1NQ3#=1gDi~t&cq3XjApTdJH2ZKR$Rq_}8 zcKNGri>e;7XA@e+UcL2-f@g*iUW4^5aq)1;%0Ep2)k0n8Y%rAj%P@UDXO+MRd!4(9 z_cL~Jm|CpYQLV|L&oI;V84 zC?^3}0!Mg8^Rn7(cDdp<`Y!@Nr=r2`$(|AcbSfvs4NBTz^R>^X-I-e`#K%Ce*$3pT zQ`?vqZmpOI(1O@np!XC=oUImeYmU`E6AinAMjLi#p9roLsia9$>CwSt4~QM{F0cIC zfT72W?~oU0`=BiW>TzPgRO<7NE8H6f`est!r-2$dl<+8F(sbV&GvH5=)-KFhZ5Xii z{M9CP69#{5U@&!4?#x%ivwAn+Xs)ntVer^!d-b)_8khonDc81HY0J`d*zl33cEyQC zXBwp}9c6e+d98$XZV52*2SnVjWR2eqaR(g)$Uw&hzPLAM4B*w3SPoR1wSwb%H-4D1%+Kys?3DE@#lIgC6mQ9LaE+kj{ z#RK_iE}3do2UX>zB1_tSQ)}QuTBJD31W1VB36y?F`S5`CKk`Yxj2f=P2lr7-?sw_F&V6jN+a#RH1n#*MsZ zq8_`i!q2`zo?yhB`SLwQvwL31QvObTKPby(75qImu(WtF2XL9QC|w$|_XLg&5;nYI z9uybF4ccd8RR2Ae@RF3L4D?u^jaziV;n-)~#C7+g4FNy!(W8qIwjEU@mF>4xZfcOM|MLY z#8cnyBzbjWv6Bo2SPqYzGju{s%CkGhyodvE^Y#{moYsYG-nY3fRZpQQZYL1gPyv1C$RViFi)-hBod_8~weuicP$wCJ4WPIVxc8Zg;LH z{riz(D8o$uSfp7>-zI-jf)6C6-nR%N13xldX<~U4uus>D-@PFJ%rl#f?4R({p$7YV zVb1#-Ago?Vc?+D7ZD9O+%AYE1%S8VenD?muJ*CK)xM$jDu`G2mi5)r4{}$XU{Or zdFv{m?@j7l@r|(m5m<0+T!E8>o0#BT(st-Ad;?q;(tsj~eP2@!IC)z&;JT1ky{e4- zkLwc2;5Yv7mrlj{_k=Juz;dYgkLB>v6>MxVF~o5?9=Kh~@2S9i9Ex@{^9oj-wnzbm zwN+teh!zjRbF!H^O!ykjP19ME(Z{)UPoym9$?0!k5rYSr;<4F64Dx^J*a|q@B!zjt+uwwI z8h!)1a~NcMk&OTtd-&6Q<)0r6mLn`40orgY>R)ii;hZbF?#@+NBq@>E7b8HSFABD+ zB!bN1kv_P$QAWw}L&N1Ldv$NLjKK-e1qN8NPgH|IFkKgLJ)i@V<@8FxY!ucot3$IH z%SH@LT}zUHU-v`CWo`$k2Yp;^Hn`~n0lYbP!0JeN@(}R{i6;cD3GG$o@Py1)%zx^(jr@D}p9>;d3=0om6#*#M`@4|08gd@O0sU0Tj32Q5Lt{IU_P`H@oIP-ie78PzfnRR1 zHN;Op`Kz8U#y+quB!%*k9%6|=L3x1SD)5Fz9N`?EAa$%d2RMd5pBi9+{~0a@19&O<)?i2`3WbxJ*67l)6Zwvb+4;z3;2E-(|NhG+oj9w|x~C8dbRbKuiQhk33~U4F_@4T>yGQ+7ux<}%T$2CoyU-}GVEu!87#>$NIN64f4`GTS zq9qSL-m`sT_wL?)px38@O#M#-+;7J2Wr|JeKA8Z#L0~h5@^`Hgv^|laoQ9Q+A+Ni9 z_PY%Z#_R;TxE??$IDCLHzpOjclYHAcp~NQ`bM=!qh!V1SjGG7UlwH6kCNpV!(r9xG zSm))#9Dr3&36T@{^3R4(>5|OQ2KmY@k@wXq^{xA8UzKu_KnFN+Ora?7=+YJYm0^W4 zS_QJitW-CErOd4>sF*mC606FMCo7t^i&sG!20nNz&8Ajcm%z=+O4Z=Z8H$8ARB080 z@)Wut|5wNa?VB~VQ_WK`@Lxv99EZTWN}9%FOnw!(y_wi%N^Jl(hc|1oAhGq*%P@Q8 z^ri0{S$U$Ue7?27A#l4h*l=q+k{-~Hpe|uZW1>jr=Lg(OTI$1WkX(M3JX2Iw<-bc` zMd7IM-{#=!|4&~apKf`y*fzwo>%;c9@JrZ3j%WQ3*E7+)*`(8m8ONQQ11gP$dsD(7 zxePHd#rpgmiBG&-nj8rDDF5zZ{VEOfpAUY!TBSw)0581&`lFI|JvsCKGQ`GOxZ{{( zc7r4r=9UJgac;Rn`CkFq!y2N}U^MQ0a`vNan?=52>a>*~00gnpbcNcmgKh(EYnfIv zmR29A3jkzk8X)>k%j%v7+Vgh=4*F(JfcRnYErZ`78gi-+hgcuMzadhNEAX4!WGVx! zm4}cYxM%RX0f0{??-_^c$i^Cu0B&=Z52&pXuArFNtcUWTiud%vX@e5*`m_n+>GEDY zh(u7Mb9G(PMQr9vLvet0sG^^0V*gmcyBox>HuI;sUGfF8J8GU%`b6<2=ZS{0uiORu z&+K(h4>39{m7tTrT0d_!FS9311Xj7M+1$4G{N3?bgl#2Nw~)=`hJ0#Lz4+JvzQtF8 z<8*Q8pyGS--?s=RG8&-55fowa->!T5KybCcTejAcPr+a0xJ&b3AXmOlOr6-yp7HOl zoh;P;q-%AS0*680JS{<*Sx7M82^1F~67=o2?uX8ue}0b!-D7(V9jf?SL@Aa40LyIh zGzR0QE^Uxj$Y_7l`-9<0#DOs2)d^!HwId0Q$BzD#)bhR~wmb9m9y_}in*j_7_;0`l z!%KYHpvz)ykRuKn`+%dCfew%yqF`W^Sk}AJ2QwWYj2Jq%OJiXVPnU*Q-t_hYYfMtOSJX z=emH`3>LmFEytMB{Ar`y$CL8)JO33(aumyG|1QOt*Z;~w&Jdy4rAY08>S)9SOkA*5 zC`HyX*tXa1w5`9J^7dbFA>=rrJes5~l1=2f`Emjx%HvOzA>3}|F z+OA|>b_2%y1UY~x+M?xK^=cUWp0WjPrc3SLS?Nbg#ohs9)QIPIkJ{UqYwUv2$zBZF z=zN|FLBB5N%aqS;m>9s;%CP|z`u)3XCYk=MKVs|weYEO<^uh_s4r7^MLaE`zwyhIY%1Z;k0C=VNle_ zs?o|7jExUGVb>Yn;7bpB7rdrrxtB+!YLQ&XU1(kyFzg*~0?zcz6>h2K%15D6ohan$ zOSYZB&69-T0Yd@Ufh|qfQ5FMUAX9!vFe%`%V`E?Vdg$)jZ#BQ2y^TJ1DlXSJy-_9J z6-kI$T&uo&w=XYaYYe+^tHr04&O_RTRL{KvE7}+M&Dm^A*dPZ9OKN}=L?j2$Rn>&Q zkA0S^VcdGsp*2`Iicb9Qm0bp5<0$TKAiQw->lXongCG%b_G7b0;enJb~Xr31vB-r@f0i|AE)o9hlTvFl zTsA9)l8gdM*+Bo)tI20<1`KX>OKw0RwBL*YvtDr<;@(DvlW4T8{RvmgKP^0oixOPA zw~Dd_@%whA70mxiU^2t>KVOhqAV}&{HF6M!S!%(h?c46>c#M@O*MRY!qeW&QJ4YTF z@E%kR&bbWvBOonBLofE3Xo@WX4qyZ&Fe0!H2aPHR#D;e+{b1p7s-<-R1F8G zpXfrTCiW1hmBe3yv@2h{BQ?2uws1Ju9B8F@Csa4B6Z8Y`sUbV{X<_oUWHm143dpMR zaL*;kh}(@4XP1-|Llv!eI8D#AXaRb3=R?lXfL#OC0UG11BHGJx?>4=7HvUKL0)TX_Hv_p-K7BqG*TX4ALp;v1-M z(xO7bc^b&SF^yd2nRkqVK8#LFVyl2f`wDF*8)EZ)jpYF&Q;*X`$A0e8PH%iOk8veo za>m3Ymsp6}b=7+Ei8tUOG%BUg$@~68GQthZzap^$_WG2s#3Jv^z^dc;nEPVtnRdA0 zb!BZxQCjB3+WQ@!lFQ?L+fl1F9LAB|@9%i}`sanB;6*2E+-HMnp!^-%thcROP2 zFSdgT^aZz$W%yelZ9zG;jdTF=%_eQCFd~*Ipm-@mGsqcb%Vzk=-~lo1w-S!P=t!0O z@tgHveF-o-L-ljh|Duuu`42wUvZ(D1Bfj=GKRW{_M+6}Hn#Ae@+0B&61RzeIQZelS zbw_-j;872nKFk~ut;T-tYjtohVp>sXT*m&(&w> z)f4F<70z`)&%uGXx~4PN5sY_zN7${MEDnhRj>ozIomGI4zLc+Cmzt}Chtq7UMH zKhPDP9shh-{=#<|a}g3bG`U7xICBpvv^n|&9^-yA+d@pD2YI`aSa41Rey7tS|vDzN&DFgArHK?6nCx`H__Wx87z&XU|J&$0eN zhOs=^KPPHBX4wo7Y=q((f=c?=8w>9q{8L`^_pW6rvn6sM|L_g)reyoRb9)DTV%FPk z-fJx|!%#^xOSO>zGEO0_s+nj324NaRD)R`@sO8TU2J&o`$t?oG=`2-&m|LH0qNd|| z2Ob$JEitg}83TNVLt)PTtEwDuA>gpDNJLf3%<(`qI-Cb3Ij4dKi_Op<;xq_ZvY!PJ z+b1mX#Fc!v1Ut>e4N_+J7sS2^rVrO>E~OOZ@hMYMLx#a0MFEU{kjR7wNbZ1MIT)i~ z2Qgm z{PpID=k52_os! z?MpsY=`@2@|xqvNUV1?tUA!#pl?}Qf4Ccx7DwAqI#W&ZP!4~>z0O_D8c-0e z>@i^rsIe)2M8Ch1)4Fex^^JGSa8BJh$$L3?U9SEX|6rH;}D? zbI6T2LW$D;a=!nBeaMaQZNX-Q9nSmz?FGP+PW`3_Ogas-!)qUoKKw0D71mBM@%)r} zNTZk8@1}q|G{X9jnw#;z#u&IV1@>S_ujhv8GLPDFJ*XP1eKLeBO?j1=W)fyKep*Pp z^9AyJ-vy(Grq5Q=7w_0EWI>Iz9hmNtdSWw*I28^56q|s!!8oD8RkHET-Z%$&ze!p;p5L(wbaedVFqML@`rW{kQ|a_B^m;b!wvh8l;+H11FQMU-ZG~@x;IK0pHeS=fN-W zN|E}F061e#II^fYn4AOJIHhv$Fd|>kTy(zh-iy^DeCGG>5ZS+2X z6D3HiMqD0@K;(mgu8;Y)q56w#zb#)a)E>v$6cqL;d6vnE|8mGarbdsl^?3Ie5}^p=4krEe9CUs_FvVmczInb|3S zT8q8Ig-c$+oeRJ1>y*&H@S~Mh1W`AdISq~twP@v3!%hs&_vatbkti=%nShs?y)zB4-0NTf=aieYp{&?G0BUsM#u?e_ODb8-Lt;q;cB_esc;iM2ToIIZ zIaQ8xGY-UQnK;9suYy%oikuxU_HJ+XjDv}+TrE8F*W;vAM5oj@FyYe8boF8He%oYT z8uP_HiLR@Gq>Gzm|NC0I`-^DS^e5yA4N>b0imwPCbTuGeVf1H~C@T_*ZZ{Nlr9Bw6 zUB<~*Ntn4nJBU46>)SaHo-h)(zpX_et;$|K$+1lwjV%-%ZX!+XL0g5CVYvh!<=&~? z&~?|^F<(UYXS-k;bH~$4iu5al73#JdtEH?0eOHMaXO?b1crao*zuhVZG*vlrNA&p$ zjws&J}- zo>A;`uJv4hFgJ<-X0gvxJd45Qt_TZ?b*s<-dppxd_Ba$!*}?1A%Bf}iyqZJv3aD^{ zrw1m623Wyd?~d8nmsNj}Q)(Ru>X~s_Z*4PW;CpfPQuam1$4u!F@7XHtS)k5-E}Y4n zJ}$p>vAJAeN&B6o2D!i*<*2Ob7)I=8IoC#E5H2SoFH`l3u;4rJhe%P&nfFTxFm~dd zFFkIsqu}eng4NPW)y@`1 zC7LxW(bf7ffy+E7pdF3Eh!V_W-Ziov9a27vKC_!{>sz<8gv9u`_)>?j_oaKhYug2* z0(4(;Ee6PXMZ6v;>0fa?m$_3U1n-qTjeA9MRR@?Q7*$`!UZLXr1Pnsz1BPu*g$G&7 zLW;*xJ>CT5URMDj_uqn%FoUxl@x=Qi$Xh#Z_o;5w|0T~+cVChWtV%CP3Y?k zh!?)5_&Qam*p^3SYKZflxDy42>RHKypehZV-CNboe{-{jcS@4HDKg}mUd!(wJ+L+z zJe^uEt^vD4Gx4R2iM@56mZ2bXf(g%*;4av97aR@0I=&~q3^C!g*QE*kGeF)P8${e+C~@3@^r%mJ(TOwx$wV*&Q8K>6)OZ^9dX8q^*<# zdQK|-YD4CL54(g-_zm#U(93n!)Mt%ON}_d^0}Nq3 zu$ui>8#j+2XN7*Yz4WfSE#r{x8i+(#^6(22ny9gV`zDn)MT@E)gt6Fr`kPH?pS$;8 zu^|Hckt~j;)8od%&VbpVd$ETEV2;LuQ#n<0xKo+P)B0uyA*{|k-?63KDNc->YISwK5;%V6SRVR)KUm)HYb|rbWv)J_?5K|4%F$vVVCPLc8b=y# z&PetTQm;d$S(1_bFI?b>@N63ET<{j^AT9FVr!l09XT58MIZCTLrbVh7O=>fSII>P& z*Gx}*ygm!j?{YL`l;^mSsUXJ#D23)wU~1KMo1qVtg(0U|-afhJ=#IruOVL)G#{F6K zVt9iwoAl^ooLs05GY@!(tO9%e(Qlu3|E`B`jeURZrz?$eRz>b|axl7v5O6;hM0VN% zXbS6kw>qI5MR5NqTck%C>TJdcv8%YXv$QKJ%OCHvwp)v|>X!uc-4|`;0@c^0Io7O+ zudV8f;{^r^O5Ciq@3(5x0`8&y95`{OK1DU!kuY%@_&A|ML8J0Fsu+Wb15V9lP9|1y z!XdlpI+IgofsgLZ%gX6l8_~lv5uh$F*^&(?kgdtiN1uNur>CsbA3CKEaColiaPnSMTMrj|F5Ukja`$aCnc+06^?-x& z6$R5N`8_bCM7IeZPI9YxYt!2&4r2fpHSk!UnuqV z(+ov`wu9Fg;AL?B0r7nl{i4XWRfqeuan~MQxT5#r(hCFC@_CKh`*%bw^cD3pfFr~7 z&z*lx7ZTZ*4bpL^-Ge)Q<1lLyA9QzF*Ha0%*RS6U{Uspx50f9?@b(N~jul+)l3MXT z(1bJ-&Y6I)Rr9s<3sPL8z1RVW7^hBb|7LuGU1hW6zDxTf@U-Z_YC)^tZ5s1W%K?@@ zektIi`sV%Vs!!8!-EtRltgYp8oj9O0{xog- zp0U-8$YW{~<@ZTFl+#DlkhROyiXAvQ)h4-r)aP;;PM!LT{@~!HRSnHn4oo?u?RDYC z|E>s)X^$lNhmiM$#(9@{ZwfoJB3JbNH~PrzzWZW$YWsHS>43eH=Si}Ht?&b*4K+hj zY^xxJs_EgkH0{pRCdvg9|R%uO>F5d!;PCqo=vQi@=0W;u{#(3QrC)v3d%s zh4{jhr6Q|(*XQ0CsSK7xK0FE~h=H!n4aubz#T@_R0f(nBUg>34>kk!8c72+XAD%(Q z@b;O{*xF#hXebY-OVA&J$|F#eiBuou& z#%I=aku{}7Z;D=aai1iN52PkY71I4g%v4ly^`Q2|Cags5OBwb=vR63% z-pB6}3>PnO=TG>xqTp-!Z>0?^4JhC<) z4S4+^yi$ZIFH*34vODBy-+il8HIwYA!jT5)v(vDeTcBE_B>DK^-JG{cXk`2Qt!mAz z*B;Wy`yqLDUjyzSEql)YGVxl=?BAbf4|U0fEC>zWJZeGj56si_CqAM4eiEV)V zHbZ^SXBowpj?dB3 zS)(>l5!)4P%}7Qe?unCVx@;#&)nRZPJJ|7buYaqKUs>R~S{0vnhO}Qi+~=?NgsG=p-U4$Hlefz zM<>Ks2ybQ_Oe0+(&3y9sBY)agU_u~NF`er*{lYoI6_;T>^*jk>J<>r5#>Kjq_SRZS zW}5>+iAdLORiI-DNLH&YShm$}Z$E6uy+g68(2hcpwI#a+g&)LDFx!`zfqu?W2{WSD z`>4vK!{&|;94irz%`vmGq~K?;k>>BDa-VVZ-@(t*S}9FBYe_s~m7b>!-K&iK9@x)O zTa2J?ba(^rx+SLQ-=j&nX4?vOhWW75^iZvMJowy2Ie)| z?-m-9f9<-5i^9FExeDOuxpH2$OZu3Z#YX^vsMY->@rdqEY{k`j;OEf@5aIQFh@?cz zGbxLNiyd(#O!@aXiAqJc_H$m|bDQn1@SFEK7hVBh27$YtZ#>uz?z`!aOJzAMGqY~K z0%sQvXk5gJIQ+sCt907fb@CF~9i7SJ+S4hj#_UI=>l*GD3>K%2PmoT8Mwn45-5#YD z%?g=-g#l^84r7~I)Y~2c-4!K+a;kwh>Q2|NkN8qUYwdo0d3z0SibyXV`-m@3)+oO=sMIo82P7w~m~RpM)gnYl((cXD{ttNBD>Dowo((HC?eOWy9M>S zgA9yQAB~f_n+zRT{_3(mk|!EH@OIB0%1O}JXN7KJVFK zPX+AXQ!g9QsFaH8>vAWS^kAoRZnZlIWj>l%tfP8Mn$X}%5TdSkfbivZ3ho*l06~ac zRoJoIBbDiv;e>l#+k;#`kxfVK9 zrqs)ll^7<9#uZq3)4>!qy?@F`tG^n5M(Cs3HR!Ft

+D^kt*fpYM#}E^TzJdYJM= zEm^s#yO{rX#>{C|Y1tiP$Cwp66SY_kgWonCA{o67hrltWesO@m7k5WxN%fN`%KoFz z$m1%!b{)bCVz~L6A4C{C;xJQ7T{)vH+}s%pU-f`Dw7>7r>jbe2h{U48@C&{RBd#cY zEUClFr1Qt%G35_=37lGAD@am@CvSOR;7c_ILT&OsK0g6lP|Q)Hn?=;ts;J? zaJRsN!)qnU{*=a~-0`&~-PxCv$rIVO{OQqu=MbIdx*p@voW_^K#hks!LasX*mDd zv>xlVh=0HH<>9b?&eOYVMr^v%A!&94b5&N;p3E>w{jy`t;ef!=!{#m&I?2jMYX%BZ zWHLALtA2kej{@7PU%#fq*^=$HsZCq!J0}2hj+m9`Gp-T2#DhRaRiXqo`&hvVk2Tsy z;-NHv=2Kgme|f9x!Wqp)X$iNQ5hJ5F+B|Gc5&mm!|GPCdp0KjCZzmbuc#Xfly$&5M z@Om{ma-$*u#EJA?Bycs=zAPSvedfw}%6mtxAcv1!z8PHFhurNK#HM$>y@)Tl<1wg~ ze2u=$&KPDGF3XFNX^v7`jO5%q3H;Vd2&=r;g3di1*n59tT4Ylt89bty=_(D+|AD*=zYjY6E!*erK^585 ze)ba82m-FHW8{vv<-0$b>V;ys8~oxg%1XlJT-@SUA?oH7;+K@5LHlg;?D}xox!Ipv?qna_$OlLudl(zHX2EqrxHB zQvAshQS4mtkw#!1KOm7MH6>}vS|N{bTcjcG8uFDTGx9#k+KP-pH>G{k5#Glfd0eH* zY(p0OQITV<4)+so{_7cppx(fX`od zGU$QG?CHJO4;gF7dZQbY9xp7*fx;DCf8L%Crbdqc=kTX&8RuQ{7o>lCCYiTqo#Af% z6kc{|p_M?G>!8G`Ug_%bt{m;z?a)0~`1# z+Yg{?OI6JBxFc&CuTdz^_bId?eky@)-)JS%V*ruLgp063HBykOddJ2t)L|FI(E4p+ zI7)32os)f8n`2~1d?q~6be9<7yf}JPBW#7a!l)USBR?haB+2|Sx2}+as}x-q$?1gH zU7@B(g~6f(0}pKs7y}G-&de5;-e?JPlEcX7=L8(Porh#f@O*8KiQnFpUn+h6>a?0u zy^G&bVN$?mAR|GbbnpwkPE523&M7H^$7!5utyf%Peyh6Us{L}k4_4M*Tn@bZw1Qq9 zy=3`!P7e>3z*_f~>dDn-FkVUi^r2HR7XvbYKcZLuen%bsEPxnih0?9JU^uU{Sj`o+;Hea*U*S&e5R6z6(#jB4D8Kcl*W0sF8PQsn z&z+WPX*6+(AP;ibpd7d%z!BR30rR!L>vxQuQ-*oKJutNWW1 zp=bES7o{UVl{%D(P_iAGMkCiEa)mr zf|lRyn#M5QxA}j+&fhJ2FW?NDG3WHFdGVdU_sk<&0;@k#cPG)SAOSUd2hoHRA}XCZ ziw(aAwM`7@V@VL))Ez3PhaI#g|H|y+W)U~;%kVNm-q6BwmjDzgH~@0tKc_DA9%hz^q^LUH9T>_05o!f;0k^oGI~1 zLD7atW%1!C&tXYIi3Ch~88BQ7)J{#M?7t|Wo6r2t97E1`ONiV za?9l5sA^lAI2j&R0X1Le9#-T>j1b zsur)8dY}90hkimFot+76(bQ9nST5_y_+6v2P!W+V4<{R`*<}85{4L_Ujyy2YNtm&f zbrI@Q1Qbs0ATPA)>G}Z+IL;x!$ zPm~vkAnMtpl}8WHA3MpC^LYOzvpC~$E-?dayjoj}gdxM3)xFtY;?2Nfy~~+-8`xel z{@y=55T1>I5gcy7IbZlJ$O9I;{}a}Yy%FHhB921}ETV-XA!Gu1zodv!UNsZ@uODf* z+h-=0U6r*{oDg$P`9_A6@cF@%*dmAZf~4mRe7%YO+>TD$Dl* zwUp#*C~Wa3z)UoeyN{xHHLHPd_5!!`;IgUjUw}asHN9&SLq;(R<-T+ug4t)%HN9(q2JaGjbrh)%$C@hp>in7?@5{VIp6>Na)INKiV1kPV~#*{v#6+V z{2k`4J^wN6{PC7)IS{9PIR<9p)t${Y6|LtJ7q2cJUnKiNtKF1&_vc$kTV<2#yAc2c zH#Ca8jDLXwZNp4?x$_serkv?GwYM+}0Kt6~LQ%M`z4gJT0u8QAgo1|D&or?`AWd)_ zOpyttoblxEKr`qMa4q|UgRnCWN!YzeYoB~mYLieDGr$(-$817G9$?7ZL{u+s|2iiM z=YeK5V68oea9)-TBHmmd}gcqE~m=&dJuQWB?tu6+mXogLEaO#hr#@k z`z@rZ$cOkuI!e6Int-6oefIrn2u*Mukh0B*c;t-}eu)MGr>rsWN3>|hF59oyo`QVb zvlhOOr}3EJfG@unFLw?2<^#BJWxbb7+0oWqk1adzx?$tsvo%J`{&4Q(4z=oxImOKEXolH*l!16i@|fX3Z!Z`Igbz~R z08DHF%r{r#M3={Gxe@(LtB@Y^8ZH?ghOJQEQy;RMOZF?rU8}V(;!wdYJd(~=wPFK! zMRA*{Qns=aPy3n~H=tLcs1JVNR(OvFpZ;;mdZGFU z&~sU$hI$y}BIj+hxgpeJ!P&Jy8yLadN7CsmD%mb@_;K6+Hpb~7w?3AFNJAFD3lj^1 zx*&UD)a=Qs@mnC&rh()jwzR2p9h~YI)TU?MZ>3LwCzay3>-=JNRjxfvFfn zLFzKuMWzMNW@xY%`2+w;3p%5>kZDu}q?L&PTUF~Q4*&$AiQ4i}kK)LWA6I2N6c-f_ z5)XCAQFk~4*W=k1bId}-=bS>Yx@=I7k%2T016NQA36e1eNkj(!B;}ElE63 z9{nm%WKGtbs&IwW;9AyFgc0B%UKxS3`G=j+?0FPnUo=6R`f#xmMEchJfkuZeJq>Uf zpvl&sSf7gqlSvogfwr2g&VvQav~*q;#a1yERL#@^o_IL$ZdW|xDQbyM`FCs0glXAm zh@Tz+d*ynjE_8C^_xdcNGLq5PBtxITkY4F41O}=~(vEI+g6Rb6s3@Ga*q3A7^oPt9 z;f{UTdBNiMgPp)u_N(u_6%{$)CVTsAe|g6rc)IbK9Usn+01qv_oi7EnerJhOHwV(=q*ScJ~VkgfrxHKp*#kMP~3Jy;KeVE*9n8qYP52Eh1kZ-~`8Uo=CIxnnyk{ z`pi<|u_uwj#cCw|j2K+A*tbuqI!v`$g7j?=&}xN2&Z<#6YSWUA575{OsC-RujMTF| z{}*{*8CBIEwF!vQ(j`cjG*Xw4mIg&>0ck`U1PSRB5D6){fPpB|-Q9IjQ5q#A6c7+W zkZ|_F_kCy9%&avZX1>i@|MmZb=brP6UC**3OF zDvSnN@f?^9$QA;Vu&l-O`KdKBV1mwb{k{-PyLnVMF3AY)j}-5_g+nPKYeHRN;YyDn zYFZ0JAoM~@(&g`fv@$&kr8a$uRiJ1QQ)wD1Ce*YQmaLa&^8qbg_Tvq0nXn&Rl@8GP zrNkm%cxB>=O~i*!&)@q-XqJ}iT_)-=_tIib$eFPlRe>1PjYR%#SlY&&pghvYdV2 zf>UITs!(rX`R1Yg(vzDKnx>*H2H68_W-LMP98Zuw#k=hjMP?Nj#vV&oe0MSGF|n)m zDupAz``ig>gpo@rD^uz9(s~K!Q=X;boLVVC+oU>wZ`E!X6td=XD_{bvugIEZR%_toN`GK6hDu`o}XL#SKuT)&W z_VC*h$<@>DWRmWlO=2eScj4TkZXVl^{20q-`acFHAG1-XP`ILV=-M!g=J50kFSHXE zvn_wyiHG}>^J>v$PzJ(1G-|LhuPHD=ftK8Tp(xJMUr+hpR2=1J+YD!7U?LJ0;c%82 z3O1QWH}CUvM%ZLtJSDm|uDZ{}&)zll3m^l7F5Bj?_QD-rF-Co^n)x@bVkT~YVHN)# zDK{1BJMafENkC*O8bd*+4o=_gpxYeAp>%a4QO4&yt}C*5}i z`t&RAHM=$cYl?1Gof`btgv0UgdCB-qbF=pX1?xhZ1I~m(&=*RUBI(%%OD=ASGpONZ z?&isc@1_b$NLMy5HCg!PLe%#|bk4&3IUM`NE|ce7rU@A6TVR-ne!g)`34lOWAK&Qt zBjk}t$uAE?hpzmdvHJEZT@a7%vz8hQbS@&F!AQ`Z_2IwET9hCs$mzPjVuAuF4;11O z$~$6%3#MuNnRf>I?GM^wF$_7Ou*c_kf(FU&P@Hyzh!bRRA4=kYNY_gX1>Io)%Ks*; zj?&{!mNfo&TW#*qO>G5T%l<7O`4fr90 zAdo!sFOUR5j6hcIHsihA%D@%~}RPFCg0whRS+xt@Kb3J zHAZ@^XzPQPP`92YS_h31LFPrgZE&Mq{X@WwCUdP{qTv*&Cw*8CM}bcQ5w5L+u?ro@ z&?Gp;jDu;6(YJCml#e(RPu&$~;(k9CJ7{>cb?oHR!c?^ANb*#KZNm9w5oN(ef@Sn8Kr*hxIvH{@n7?%^o-3A;Tg?3og7&y zxKgsFLb#b{i=KgUz>R7jefza^}D3y&v&GQ zl20mM^6NEiba!C%j0tr7efs|Wy&AJY@*`QGPo?dJHt<&|qZPRn^%d0-6|w#dm}?vb zfDPjG(|ea zt6@t^_~d?|`NX$f3yLv31P_Cbe%x=pNv~8CM$TcwSSF~F%W{Yh4YBEasX>;PeuE%{ zZt)h6L&2*wLixkVk%e{5^XF@23om@7jXkw}UXlNI5;HH?%Q8aiLcVqkoTI=gWW(v2 z0PzM&q_euY#}ki_lZeb3X!6YzMv%JM8iU-a0Poyom^iR^vyg}1w=6k~uF}$$V4O^n zjLdkKtn5NZ>;|;9e^}dmup)y9AHVlHQD~!c^!g7!!{LDj5){WAblnvKnQnC&(e_|* zU1*_H`{DriA&;N`5i;>AWLlB`$9d`1XB;I43+t!RVQHvn;1}r7emf(obY!_5y-PLb zk!VCi%ooFGX0k9v?)j)?Ue^zi6&%ueH2n;?zI}XSlQd7e2!0k zw;El3bchTXLv1$gy!}3nNCjAvzaXg?#n+*DkC@&yUYYM^prM-5G{)sx^_Hld5tyKJ zWylbor4RPM7J;=&Oe9!mS%*w~Lg^y^FjPJt<*@m8sfKksNF`N{qx&a zPbLsKhU%fvC(zugr8M?g7ea_iQbuA_qB zrxi4G>sb#p+_fdxMFvx~KWSf(GQBW^vvBzx0&gzmLSlVa#P(q0PGEG%3z9^QZz0_m z#x#uulFogIC8Lx`3{$nB_rn#zRkX$aNtzzbS5&6dc6{3q1w&h?Z=2~a{CF2O-?j^S z>cva;{rw-tWlT+eQ4_t*c*f4mk(YGUGRLPsMn;35n00;VL*fHtz7G<%XRMyqJY^>t zyH_{Z<(c5gZ=5ytJ`#EXO_nYLFP3jR{d4f6=E;`bVz&a!LdWQyN#_;LubkK&RL9zQ z{MhBs*z+D%HuZ_l+P9wXuk3m#Iv&NwV!5dblOfed4~5_0gXEBYzh5H++K&rmwYXbd zyqfzfC>j2p1jIg9-8*Okv6s0vruPTHj#SZlfEpBP`PyB+^K9SjEO z0c?D%;4!E&RS+M&UsTAF0tyn;-h6&AT=1@5F`0u1#zgRZu}fW#t-O?yb`QN_q5(u5 zF4p9C4osy?O-yKPo}AIZH;GAQ;V^ot4KEW6x+BM+nxYZIqkhAT8IkEe;dPP*>Erl& zz>5B)wC?(0>SKbSWp+%K2EGXY>W9#1qJTH#)>?AxGkxK9RszS6BAcgjfQqwhzv%4k zR-c6-?`OvN6Vb_(4&BLxzwOK59HNYW{v11h4$(kT*7b9GwWjl#V`1alSwsU#;nN_a zAe_i(BEQmF{Qp?7JU>MI<;;1Mf>Uqv^ZGl^`BJJr?ueSDll=iQ1}lP+!2JNx$-8Sh z_}J-+*}C1?%CxVFH&BZX7kOp zhKr>Kbw6ZGuJ@~=!spJ-3)I`%$kZH$uSN@yAcD~wybSY0@n>PocOYz&%g=kiWH55d z5;OzZ_dj0(5r-lR2Ez%>o#)X^9|HP=Sxz!481KN(#+K5dcmWJoeuZ3i#1xfcvM{>lP8p4hS8o6&sJR%oUo=^7%MWV zd{QgbxKZ#rr=s?ZIK553m&OmGX@>+Dug$txM0cW{gV!3sZE3+zb)iYrW_XUmQWkGq z5^eGT-~D1w?|4;}FK%5)Y_GH?yNK(HuR2o;sv#afRHJ@(s)uYds+M|S6j?3DNFysL z{BU1ToM?RZ5gNUh82PEe+v&;~qUp`djrAMVos%XoZuz6Y5!(Baao9D<`C-y|VkOd= zZ7spciSy-z(^tmc=qcyxWaXRNv7`6=(S zqpc1#@$d96K_h=U_L+058~Ppe70YGW+%-SDCO!L9l(A)O?<=Zk(G*@DCfJy@#vWmL zqL|S+mPDo%e?7{fkLnP5_uVh)&J*8?u@36viV*&82yHkb^x}8*!`uytgBymUe~VHI z3(*idWSW$#ZV?FQgm1EAg#X~yoqxG$V`}^W$IPhPfhm%lUQ%z1&81Hz#3Sb+S#I=0 ztVJp9&kJVZs%=HggbtiPi7`|Dpk3~rCF9(JsPN$O>imA*LeuKE_N%N~n-CUL*$&Rfqhb<+2pw(* z9lYwHuGqbPLBjbB8!fIVnmfAkFwrBrav$Ac0wc_qpa5iu?F+5{Qxcz;J3H$QY($PS z?+P}pJPPqEzmD4JiK`CNflYV*>+ZA7(&kM^DnB}~Krz?AE4cY4zNhYCEVrYYG*0}^ zy?guo&bV|VRy%Yn3>-N2$JbGONk;Cf^j+tlfCi`9-A`|q{J2oe#MATMKW0q+>}{+~ zA@I#;5tDw1La4U*6K$OH-|Ot?4_MK5Hisa7#|ss?Wc%T#-=s`3Zb{HP^hI5Ecxg9C zZ{ueb>2IN#-x7cQiAzsr|4;{GV(3J3!2QD`QBfgwg+H8Vm&uB&FlL^cY&daP0;bEi zPrCG<#kmlB6V(Xun>T|Rx;IL6c1E+*qWLSRnCSGOh?3=JmoKgyE_W-<^JDbLE>6>r zyx-;&AP~YX>D>C0AWABTT~avrhpSK2W(Ids2Ay>qmu@8}$yQ#GrOo$z7%eKgj6!%V zy2Iq5Yz%YS2t>mdFFHtN7Ug~YHk2iKt!$Qg0DegTQP*-U#V5zd8H9mj&rFQ_H@1#J z#3Bi-gbwq(Kd5_-t-(t0Qr|%B+?7$0MmI-RP9+LRxX&qM2TOF&<1{&qmrR}Scp}xP z?dH3`7?~RFS_O;YNsWnb`^r^k4=N;i2I6JN=4Fe-K1;&1xtw%e$vgu=llw8$f|;)YH4A@w{_ z%Eelg4USr8MZvDOZ=ZFbko&jm&o>k4#hk|Y9jB@a?7!+4 z8JEvEpFoe$Ak0r-_5S{m-1hEs79u=rf&ugUvz*Z8A8#S^*YdDMH(Qzj7vFi@7v>yK zKNclGy;;4ATKm%4N_fksckFD9L(_Z|!>_by!$-cW-Fx83hULzzfx<_v=+g>v3`^4y z49|UXu(L7<%@nV0(c~{2fqd=&6u6u zgY)F&<`pVT*w0#b-56kPfRa%CvlEn=%#!A zI2b)4Xp3w_1552E#EgTo6_uFl!F`FWdlK<)${a_zT<}6kl->a$#uf_OwbS+#?$_Vz zpgKj-zdAHH*Y zbV2-=Oaxs_I_mx!C?Ski4o8tNt?^54g^1C_Cj{d*iR`aHH#G8Y5=Vh@u)V^1s3OUU+lxT28HD(TpUd{wTR6g>mhqzk`Vv9hJa85%Zc~`GPwX7uhlS zMcbF!0w;b2v4oG$gGpT7Pfo*)#u_(pe?Xp}DeT(uRRAMy1b)D*zucS*ai6Qym`nV) zLw`i3;xi?yG6}~XFW4~{owU(C~zlJqfN0{-}<2=soq+W8k3$P?hDF_+Okiq z6%k+r-sNU(kOc`}!&mCR#kb6cw&KUvCUd0`vM6$Jm}eryRdu$*$Iy+NG?jUdUAv!s zy0-BLP@-J*vmz_fRy?Y5nKm4u41X3?%;SNX1T8n~T!A!4-PJ(>ns+16@}rM+)&AKf zR5WRj3^Xd~cU^Vv93dNXA2aa`?TcGskaX1-8QE`(4*(FEs3uTb@YSBf&jcfhw#P46v@-fpPT#gJSOO9Uv>iN>@+!pbkIn3UC-t&<3creY0kw>DN>;kRR7 zVBT1><6|*S)zi4$M1S$(p83b@3`D_YV=yh!!ifY6XiQhKF8&!}$%T!-X2lLOUo{gpIv{jn z;&SsS$%OR~C13EOaOXz8QuMy+10@`e>sUCbfKsDU3*M*5D37IzBjK;foLaxk-So{WwfZ4)sM_Kx*)cbe;X9;PPbma-0!aj6>5SRha&3QNuI zLBpLGI>Vy2LoX>6W>a~c+!7rE@#;sVLQ^VTLHL|T9f~(H*;?!uEs9a4X~}i(yv=@u06x4T~kJHAU}?AV)dxSA@hhg%7RT~F-W3s)&D(Q4zh9#!$% zjB2H$_O19CyUO!JLNC^%k*8Vfo<6NxmW<6>Zc&OYA&>q!7kKKszde3pb3%Y~y%+a^ zX?#|QOw8y`B-Y{#ZTvb13+w>42TWGdgk`lAKnKF@elC`g!i7!2f=$80A39ciYYmGE zLpYe(OdB0?K+4(U<>Bi^MWKE*WKEcw`ZIgz5uC@RUZIj=dP$Gn2Tkkt=-$CEdbMfW zjNYW9ElU3)N7hS~Zpag3$gv3>L_B?nwZ;EFSDEKOhsl2!3NJ@zJnnAI3a?9Ty^`ww zJwA(~W98jd_BNEzg}dkVap8cmDJ+vEfz7x!)BM4)1QBJ~$s-w0E5;CE)&*`Ni+(sD*5!6VB%* zDfpk>Kdq66*EHZhgy70joTipcq2;E}z&D=RVA2)j0*hg@aObIsK6rtv-4`?4=_Hdp zI7u{=AwWELo;eb^&G!TsBhILt-Yy_By$1WJfMdmlB<`4w^{bd!GD}$oiX)ED4-f?L zUkpfKG>3=1-*sUXu$iGs7Cbyp2-oYnY`fpSV#xM7-qk&FqtjQ5bHaV+ihSr+=BcIG zq33VpBY>l!QRa1>LLCyljF9l#>TR^}xE9NJbME-8iF(SfK z7`)hXg37x_hj3_hCnjQW zKR^nq85d1zjxX$L*0$x72d6;LUPo}^Gk;ilQG|sNGL^OS|)O>l6NP2!v zQnR0w{{8*c1Km;=fs;^Q8ue~> zU}c+IcB?&Py)0n$_B21lCTfj%0=tu!uvo>8-0>Sf@TRVk_$-4fV0-1NOJ(F9#j@PJ*hhEzE|v%FwF6 zNHg5VMymfE6{av&>2R&uALOb&)Opx6|M=Y8IP_FpOBXedOMlr`J*hzDe%-S_(*ox^ zz7w1}B|}or&$_T$+$0t$3XV|CNsE$Edn5ri_s(JM^)V4w!mDY==a4s3|2${*le5hi zlg={Tn+Yk%a|+DK(atr;_b(#_YuT`-j|ApTha`py(hU5w`9|=x}nFbtO6EHttX` zj~@PB9M2&_6Z^a;oU^zQY?{k_dYtU~ z%k^;#3wnkruJOFZGf{u;z8s8ycJd1q=IFO%00%a>hricfxG&S{?J46-QqD;a<o*Iywvjt_>y+*VP^-dL0mL- zAeAq@FlFsgMaAXnfQ_l@fkumFf9nam@02a3O}^zA`lhBD(PQg%yTP=6D6rxBxB!Z> ztk>Zn=n--Lo&0*3{7q`v+mOf>-EOsb3gZVP#%6FSOFK*?*0?^Id61&XzF3T_ybXG> zzNsC5v{ieHG0(c$V^Q_^KE=tEDDPC4eyY~Vz4W&&P$Igcel_}chbpAK5|2Gv^q@#U zceV6G{tXBhc&qU`q~sPsjcl=~65P~C0<9BX7TlYOllk>9h#7K|2^`2$S(Jt|Mb%C= z|DIfPbg3Mc;6?rVHmU}KcM4f75v8uP4d3qLJM)l1ZLsLgIPmwLfGn?e>2NOSIrPftZexHJt)WxLS34hj_CLYXfFQw z<{amNsLDe9@5uuQiZE)tXoPg<33|_5Gm^JGFV;epg_j1YRDr@LMLo>bSOUqvL@20T zjeHGjI`B*}TwlGT87bSy9c?KGCX~a!5SR-DWIdzc>#qagrT4=uAl6FzVG4Z9e1)b4 z;K(fj?s$)S^~S+l#2i5w!MF2gcP;D3`nA`ySNwk^T6ISCS2>R2GU`YHbG_<{iWpU? z$1f<84TNBysT%%RaMOT40GNVye|~+daGjOTM0vdU`H^reSHDL2S7#mQuqU8FP$K{B z@i(pvTMA5tiKM+ScOf!7mrFZ?1sDQRLpfJ&L5BN!+x5Huv9_J)RMh(~8TTdPZ%pPC z!llOYQfhC#2_owBexR&pw=|IYx7qP{)++j9o=%p8=O+WB)WckoibuB_Z!!xasbMCm z@Z{BgaB5mVaCAd(Bi0fX|IfikkbMMbB{)G_O6UwNAfSD}wqBEVvj$%Gn*lAH?i`6^1C+#Dy@2;9BJp{GG*) zdZbyGtwgt@sI~O^Nz{e~y_C!E&M&SWAyQU>3T=_^NHE3>`?QiO2lZYn-&^I&Yt-WD zSV3Ea^68y^t{@$TR=HthpKq&7Ub}?svqr8>N?R}(;>#53ZNXqEPedOnKaS5r`DRL7 z#?YV9V{7wGNS1WeDksR;P0>UtrzzK4+Zm_M=RO!jv|;TDuG>X3da7q~ilv&++Y*?Y zPMX;P)Ax;BZC7-JLmK|e-|zR9(r@ef+P44~Rlu~DqGX_GUC66(h$40XF!;g>SYxBy zT9@e>XE3h>BjCax1p2I(_okW1-;tK*- zNt?)0(&h?hrCc^AD;+X9@1deE1uUf+4HOkgZa6^Uk6;A7ZS5lDC|_A|t~#hS=y*M# zuFqkX&TzkLs^dc2pHKlzIpw)~2MbkFyeClw7Id)XgQr_Q3zDc8hzup}WTs}u-SSb4 z&kA-krmxxyH1mD*B|=p$aC_~_{OG+25`wQ+A)2L?li4ROSuu%zrn1srg?xQs^& zNmz?L>^z1<;^5KW%=Y$kLmteHJ#R$O7Y07vkj)@-hxwk~_4%PGUfp2gnwoL1>>nW7 zRODTxof{#YiA#!aqk1pkcz2v~>ZuS}S3J)GRUJ!3=Ao+DWtX89D2!Wwyrkqjf3CL;%905R(8Yzh z>;13lg(cMbzX*>LvD-A}p7_L_S@{E|Ug(B@*QKu}li5BOOShdQx3)W)j32Bgni<#cr4LeKZoQ1|QYFT{Bd^CHxr)X#Ro|&!7D~kJQ z6GvUeksRNrM6czuCj!XtiitVBrbl0M&aKj0jY;@;LX$^bO+8?;-2;gfM9*@(mrdXMhlqk=8d;{eQ(Bva z*K$U+_p_X}sG5TWCuddpj9Z@Atw_WI&Rj4JKlR8MW*?G#19anaFcj_WpsF{z)5N9o zRKG;fb*_k0Yv#Q}Pwsx0SL)sIlH%XZM4ji;r*S39~r%-cKK z4PAU(O`T_K;^Uu|U9FRM%mIgZB*q`y-Nw$&`uUB!3z}tSp*ps`Gt`4_e zNiPCGy;onv&^bV{`4y08&!<+mk@+Up4_o){uSBK_ye1x&-Qw3giMo2fgEm8 z=At!Vq+s3DHYxf`pfJ}6?|jGkS#tV*_d%4!nD2n%=*+LhD@pkQa%`b5kmQb95|2If z31quBt><;176An^i+$_q3=hg%i}R_yR_zyigaW$!!e)O&eY@=>zD)?%mN(K%cM?PTIHDAqtmLW%9uFjatit9i6EbG@f52%;C7`@l2u zGz8SYix>aC_}!0*j-&aGw_U?Dcmen~aHVi`&9h$ODtW=M)^s^j%FBL$saW7<_rRTx z)p7MI!(W7_x>6X}7taXQI4o}Jd(4?JU%HsQknd%oJ3k~4)LgPnrTnVd&Vv8oh?K?= zZ%&zlhYfB3xs?|Wg*#-Lt2j}$8Z5{S@N0|nxY?r`bSFHa)Ey)=$$Oim4EnAB|x!J zB>JpNy0fo3^FUA51QsWqYK@pfv4&9-ysZI$xTSrzQ$GFOzL|D9Rn(4+*-m(CZpS2m zC}&$V4IsPWfIk~m0|m`A7?i7jcdY0VRH!Z9e!sM7x5&3o2_S{omiVM3tYjOkq-&J-PfPDhPIe_g!^9bC89ydS;3z&(3K)QHgG8_%|hF__)X zg-aOG0R+=uH=i=(h1Gk-#{9#BUUGhq*ic?@zq&#yJtR)~(%E^W2O?Ur!J?y&B7wU(HI-!ZR%o5QdM}92RtQd~*gjMZd$OWvz#ZPqg%2u7%6_tX3;8E>#Pckug{XOPA^b5u5D#3=HR`Ts@T5 z!$?UD7AKr#`6m^wednOa)&d2!UnHC|Y}i@gn;(VInvhw}*kZ>SC_^oS$#mRf;S!vu zn+o#+6kF<$$-W?TAj?Vg4qwm5WXq(_t1au>LDj}?jOLLYLT8-({;6<-S9cx}T-pot z+6KsJf-Md_M1A&Fy;bKq85w6N{_Iu4e5)SUM)!H8!Bql$R3C9WU$8s(c&Hl1yh!A^lrYRdcQS%8Gc<#yL5Q4BEKI<`sMt zJoIpH`rmn3Qj|e*Fva7mh^%76PZGN#Y3rG;f!T8LG!Ld*<7yw=d%QSiHDAU;VG~)n zGLnVR14TL$H5eFG>2eu2OsHl5#gO!<<{Cnc+!OV}79rUv-YL1)s8$u-W*ysa+cEYK zQ+&L|I89&ybm=Lk$!ofLu7ha;dp?g4DYwa=OM^P;e-5DDl711SrS@jFkt61(xOWWR z;mb^oJjfy2PDj+U#|=Dr&~ueJGiXbw%Kco@d+A1<4Yf4lprQLT_!;|F%s$+1;h7kt z35g1ak({X#K3`uEI^HfZoND7&nSvVh86Lj0n+(lU@mb&GM$ZF`}6&*uL{ zpqNL2;k7*9uU46<{Ql&3@L$OTy|FJqyp$@(`IlrPxk-%@;o_PAHTF#FCL{{ahppwNG+mj9e z!M@4MEt7$P)g1l6a=7{S&=uDf;CfB@G=7cKuNg16saCW>*gchVv(V;27sHRNpYVzg zIf=g;@pay~An95(KHGSo)@kjU6bv_5-xqfAG>5a0Dbn;Hzd~y0Bx=}#{#C^l=HSL| z>}9As^B(^u9J>ziyO`?M=2fS`8|KVofA2i|?FLPy5xYq`N7jt;ak-lW@+;jf>MXQS zOg4K{PC~G+15EDDOYg?cZe5PjDQIHgm8 z#=r^k|tf?G#Qm+L{d_8r8R=rnSklCM`CqKR$N<> zcFku#iep8$<8Otz;PV_kb0fqVkbbRkOmLf%R`PbJRD@Ox4jF%n78o=6Z1*(5gbHe= z8Ruxac)yOnWxXa~_wRMkPpwqA0>+OS9<0t9w)!QxJ{*;lwNbwhrYxQm$^&ozri7-x z;wAf=eRl24LSgEs)h2<-9yg^$*;cS-%zwkL=G?UMqUb}4m2MI1A-F#Qb<2=c@=?H9 zf|_=!at8ww01vVX)~VnM(TFQ-LjabLb}fp&WQZC#DKSG2SW#9K0IjE-Be`(L@NC7q z%hWM0cd|6&MO^e&AQLgr!`o%Cwj#E5v*pbot;l)T8;Zx2aidG?6JEt9;rwcWJo|QW z=ZUfz-#(Sj2k1$o{8F=C@eQrxZ@;*mzrg53F{0tYrufpt_DYymq6IajCb7Z7gmlzT zX)I?opLtM$euE>hE~M|_iLsyEzrwsCySmwNz~Iqf-A#CWpT0u6{lZ^4B2y8B^{BW* zypjTE$P3O;6z&nAjjKY}_Ed2I3F=1uACNAngloy`$Z?*+q4-4qkTaUc9y#B@MUK9o z7p34qC?gB*%RJj`0Fnp=AKD2l_6Q}fbN9h%Ropwjk|qE@UDIWpi+jR#N_l9_+=@kz zGj)h@PcawL80QOgaQNHm!YCh#z~ks-a`S29A(?1@r`llCqmwtQwRpnThH~IhK}eaJ zpYsH||2@k(WyKdgR?j6F1tzg@*zo3SoaNX>uC|qnWnw=W1TD`mTq$q(evTmtV zYe^yZA-4Q%Qoj5JmrgRca?ao$fiULf+BC|v$2Z~SxML-t8h}sz!yVvhBBx|rj5QNJ zxk&CdBe~j#`0Id15LX|f4nhFxB79?mBPYs}hx(9m(^3KwD&iqF%+CuI zp%)7YLzeMbeRauEdBC~4b|(6<5b$y3jZV7TJ5o|xWefm zAqYxV_=ezA30Kq-5Q^pMpPIxpz^OFS1MvW0l?Amn*(0D>?uD}*#_*lKTDLgP9ZBRv z_q9(k!UN*8+0+Qw1C9wP9s&-sIce>-b|H>ABJ2{r+rm91ls!7@W=vY)^eg4}0&E}I z%c?;rTZDhQ1BSR0jA6(@TTuAN3}~NzUz@be7|1-@F^B9x%VSjM7Q*4n9SIYLV@B50 zCPnoW4#&HQ$zTy!-8hXbo2!pb566q;$;7E0m?woFR<0O+?#64~Y)``@=>Mz$-c^A` zbHh3e0svRE+*uy3fbcJS1crAlI1UxxsvQ8d z#|~1_pU_&Tj1Yi;F{Uk(;8zD>qe;WPf~i`t(od&z zzLIFz?fG8}4i||}F=L`Ad28{w67$A^3cJBPV7WoNQF^#gq{HBgB? zh!vaklj;Gb=v#}oZ*?&!- z9%0VP;EMe!2f0wB1XW@^1|ZgY15(6{b=V_WTL6cY3e+e3<;O!cAhly!IV`*><+c1K zu?7TQ-ewp9KI3q+dK*IO@c^$zYenU*z%>e5D9U7Y359;&)J3iXQVu`(m9^5csh$G1FA> z?uG?7IH~VN5!17grIo!}0F@}QGbGw&cI-8G7$uxD2yOdHpjYy-zINv^yzcGz)rdc2 zbN+kl54Dg=Ty{a^=A8g&XB~WU>85S%tj{R1z>Jy)p~ij?wA&599H+OLrgV76aDMc= zIi0BwfM!{6J+gtPk2nj8ez5_7+@u0E^W=Bv?(ZzOmLz~~3+T?tTm!iNkY1%4)TfPC zq7G|kJmY3*ZpkU}1VDha@u$CoeqGV$qKb&{6m+|-H;|1$pSEMQQ<>Zgk@JTrSLuWbssD7N>*$i5a#HdqauXX0(VZ^K%R!yI}?!kWtufVhe~O{El~C5K1ike3ad|V9qz8ZuKf#q>^JIBLP%D}7<{zN8^60TZFKg{*BYbSfz__zzRu4W`nb#xjED4<~RHUfw6S!_VS8Jo0gN&gu&T_Q)_~FiM+l2*YTvhx> zJh}l%CcpdGA9KXG`rtLOgz}$ys!IO$a(@=E%xAvQ=6t}S6(^&Gw#ow)}9(*QOe zBMazdP28>?47OzGxh8j@B03w-vRXwu>D?UgTkuQbeRUSRe*q1iuQLS-F+cSYl(DRc z`H{}%qD3N_c5*dOEHFQiZ70LkNeS6K+f3pXOMx#}`*ZtQuADuTSi~LqA7QFAZ>5)j2oSI}8(0 zuleieoW*TBp4UBe$|u2Cch)%~% zw_Np+ff%{{E-ADAZhQw+FwreKu5<*LJV#9TtTrW56yJ`luq={2s7X?Zo_CSBthD8i z=5#$mRdB}`dG>RlhXF$e$ccYKiG0!#x{nGVU$SpiRPiEoz!Wo=7uz2H*-eSAY6VU^ zz*br0n6h1KNCdozR2y$Z5uu0eYpU--y@<8Env;lNU-4fMqnHx-1mcSNk4$s>2*Iq6 zhXwmDl-~|U#Li#2?7A>#H`Rx7ApYM4xBfq9Ks7J25)SAMgapwp5yL~mAfaDQ97bPb zI0Mw8dkj557TMN99Rev>%n4(gioofWy?e9wZ%aWh{Sq={$!nYmQaapuXAD}(^ZixY zZ?f6bs^3X^nejtW$|SH>WJ@e))=H9T^QYbGchY|TghtFy#T?lknOWB}D$ac72qnhV z=D&d&#EN~a4CxRfp`=GLIl=(*z#DB!CIeeUu=kF1ppKxK$T0*DC*F%R(U?;VqKxMr z)Z7;o{zVzjyk5w?QnLNe9-h}P=rNKE()ZrPYWDLFPufCf%LF0$Ey)-6zJ0hl!Y=Ai zl38AUk?q989_=z!rGbFq#hxI()PLzU+WIZ`1Gbb60OQ62R8gG>cX)O3?kU+E`ko0~ zE(Zcd47#{hG*7o+Ni%UHOa@!y>mCBGGzk&?3KG%p z!#(QUR5raXefr*f*`(Goo~#j~wpr#77!CY_Hz}*ELMMXffHa||f=XE~zFR2cWa07j)l{q#=M1&oxI0mw;l&H7V}K#NwcT;RR6Zh%-;CtV7r z8fIU*FGCyN4(Dm^@sqQE@4vc_>L6H+55RqQqJ*q7?x)q?{dEsp9y_wu#@G|9#9{qy zz9|oqeU1lLpmm z_dU>3yAwEbpOUcLjkmOX z&@(hIR?CAlA%;nLh6}s0t=zcqBn;Alvs9l6kzDbPdGABKkkhCeXD+@@=Q&x&Bodb1 ze61V%6J)!dd~SvY$84x^5;MsfyUWoH0RGGmer*oalx(H zlD&uWa7Ay5Z0?N6j|V_mEFD6s$q^3^z%B+rE#T$vcQq!J_F-P5Wy0en=7le&LAvl) z{Y_NoBZOWf_h*&;;vqsxL4v2XdD5D&E}vAlmVyT)Qf?S~SfEY}CTCqCsU;%yF& zdOP%Rwu})Xd_ev6`^SQpw^Wo=!rg_vmbD8Ei=X}iFO5VX*+!5_=)aQ&@jU0+Rv?sI zcAgM6X$@))bc4t~1&CSwV8M(2&M8*7M-_8hiPCsL6)VqUu_vsUA<0T2_~gL>Eat9U zO@J61D`dOp2>ByfPERhOVtODA6j$mex}=y0o`8uk&LP-LRA_-6|H(-=vKOD_`{#S=32*zs3HNiwZ59?br$tWsYSl_{ROKsV>vYETd}k#PHr-YNpb??a`- zh^LKTx#V*s9eDgN9Z<6@qPYY9SfMdGC5{06aR%zCJyaCN4fH(J?1g)DY|S*3KR`P0 zI)X0JQTE*4LMos)(*~eMGJLb3 zRHR`lQqZDJ5qRVYcINcoosu&vCHq^1OAJ=Usg$R6MKUTA;osvV0AZD`AD9=d&zK2Hm zxE^O4>hW8LMMv62shptKpH5C&88YQ%Cy5Abb>_g%rU74NQ2y`1wJW9k26#xn2B;7z zkLYGyw$pse+oXmQ%6j^PIExgz9Hg*BvUBB^2=;-3{*F6wJqni*4gjOD@3&0&K~Y{q zViBlL#ANptq{AW!Mn_1hw~3yX0Q^*YVmao7Lenk$HO`ST6;0ZcjUdB%Cqb-o_o#Yl zFnth?o)GAEK0PlDt&>u_A8#IKV+F8d=l-i^=!*^ge#2uOTgpI`q`w#A0PBCx$SDf>9g*b&0h-V+8ax zZ@8DlSK9QU`aE-@!gYM4Be3$BZ^TYtGz6H;{?1xBCs3LA70n*Nip>p`383f(aJJQL zYsT(&Ku;i({ier{e(;6;D*M^EO_WG#n0ep6NCW3AYZ-JKD()c&E7HB;Z!rl}1^!12 zl4+1&^h2TGMxw_gtfJ1pKtd<&I2BA~J=I4kE&>if^0dFUyK&6n!Eeg*^pukJ=o~#@ zQj4Qs@%ZI`&jqR>qTmOPV-!aC}(>{rRK}mPVY?5U1HlrWyXd4;4J#lCXs{0*M z4apwF5wYsgVVP;upvr)1NL$EcpdL~UiOmYX8vn0Uwru=V87UyWVmsY)%_Z!FHE!&l z%3QHYT7s{tz?+tIb-s!NnpDb4p#3cmy4CG=s9(k`={P@Fq`#O zG8R(vL7lFwC?1tX)PF|q>~MU9l`wgiLJHb~z*CY{b9bguM=m4M zeL?`Ir-BBfWX=3q19>%$>ufmCdkF=nDS zJWliRM1n_{*Jn>(0Ux6Hj^8S*s}&>GNEb{Ns+T?VC*&=IqVpFi`qIH}wBRmI=6FzE zYwaDr+v7JQ%j6OO{mFYm#eDpy_klW1G=LkauT7AhhREd}et9HzvT9S!VuBp-B;^Gj zT~U3wTuMtNZQ5xblypY!Y-c&3b%e#t$NU~F&kOdI5b}S;I((_rd|2uKM%sHvbN&B+ z;}MzJWMyVXGP1K*MkOO6n-|%MB72j)#|x3YvUk~|GD4Xpn^2L+cHN))e6Dl-{<_Zh z`knJWr}z7PI=o)b=VRROw{?$^;riE4-ShA>KLj<{#-&@Jl5Rv)(ixWx;(X~~MTrQs zWC8ri4ot%f#A~1}c}qgZcdK8N$?wbAan18I*zVt>hFCDfw?KdQ8O;Oug&&)u$fHT% zaKsgh&&LXeFKJ>(S{!!WL2l{VZDD1=py=Bx_5XJtcjaHFN*3arhCDQqC;wWX)y(;$ z7TCwnv%AEv+|`499JKUdX5p6aI(Me`(D8p3i7Y_!4m%v6l zgy78K!Ne*|C^(_TUcGRY@6jmz%1E~W7^kx<4}Pck zY!BBYZ_#VCjublq&rO)zDmv{1Wg~t_mcOuCkH$yIj(rL^LRv_)Cs9R!6{sm@E-3vw z{BmYBrp{J4#=qBzIZJq(*%97n3u)DPgfSU6oxz>Nz)JwbhYk^*<1;2W_ErP0?l#Wk>Y zRX<3Vz7llkSQ8tRpo8^cXI<&=5zd@|pUi`NegD>n1)sCS+4^Alw>}hZFK9o6zw>Z- z69bO|batlj0AEtW5f&a>?Io1xyuhmNMyUrst*t4T`~%{8$Fa7P!}R%g8RN>EfdpE> zaYiy@UsL%q8<>CzvMJ}%e}im$38@sr>i_E;aroPbVn(TB?*D62_nj@)O<-Z-ATPZI zzD5ri2i{(XuYuorW9^NAL(WgrDWnJL`QOISdv^ai31N4}p64L{PWc&5`Ea0l*v zwB2k@5wHxu{TSrxd|?BKPvy_Xq}IxH(+(_~FAbGO&0ohuprYpgci5UO{=rRu|17G) z#pZ!Eti9kK9kg{Qti7NF zHQ+W{fRJX-G=XF?&+Bkk@h>`EX4$QEm3-0n%3ZUw2VB|}HM{v1zUSeYK-37%n(dC> zo+~jxtmvrnw4}llWk1g(6@>+R20=lAqIqV;86)%a?0qFkbjH;hf@i@KB>Ljsy-}C* zKpD@Lu2y*=-$@ttM+nyAf7HlhR9{j%Gn7hEe4_Dth(6`@FB|tc_(qgb_6g6ez9>vK z*9<^S1%9XqOxP$CEh==Xif7*^@AtXhf$(!d)um3A0(c35XNcKVJa3{kL0F7KGLx5K z|HB<}_U38eM_K|*3tDG~duDS3q2S(GZ4v^-o_rfU#iE97@43;%-yM-PKt2&J3u`+1oZ*F0(KXrD9S)!GG4=H)?W`A zb=bymY-L3iW7u?ZeYkenIgx!+$_&+epYzyN(!YinAk{3@6<)l z6buXAZ1oAFy49DxbFmMxz*Q=_d2Jr68YrDXOsbMN8M3qUPOL40Moc!rb~QwKKB@&~o549{jUQpH`1XM3M$AxQy9a>N&y5O=Rcm)FHW8o=&ie@#$W zEddjej6h8@p4%GxkeT~OYziRscK_`b$WSf_L;&q_Lh9=>zaR-t|Iv7P#^#sxWcZ)A ze>N--A@WU`bAs6OpMenI0X2}6-8w=M}D{|dAhx7UD4C{G<-M%@1urkxg3H8z<1 z?-E#}MCGRC73BSVC*i%k36}Bm7o*8WL9hyvdl?pwcQi18{pP<}0OvGULHC~ojK~LY z{aAn~h`Zkg=gh{z)Aj$gal|x!6}d>NHYB+nfl%_?rD=zdco+b}3nl2eiy!Bx)$OK8 zX?d~#-GLwZ9!x5QIizc>!lr_r{=cD4>;u=jkkn4_&I+OphQ=3f8EyDG(&u&MEd3>Q zWSB$73iv2~dFEpN1VAOL0^UfbA}-VjBy<{`+B9eV)aP%Ny3hf2{5ya0w3Bf<^n zHcdzSPXhZOJKHH$5EXUw?esWnCS>)(TW?v`*1bR6U zrFqfKwyk4`eb|j>KMbckTQ>Tcr~juJWZ_)Lu+-tkt5jK~z7FCA5^^NhEl_VC4$?eu zYH>?4mwX5Of6T5LKJgmyI>`tEfY}=WxzWrEmW^gK&_dkwvRJ;2r#Fn8fNYTYnUFq9QIf^@5TQ$vde*}V?peF&5AGeN?or&%a}vQ^8C zQ3xc(WMIXv!vnt0d_l6kLqB2w_>rdYc#|4vsMi~;ZB@bJWdTugX+hN%@5sIbKqMun zON}pp0lZf6ILSArF|ky{|G}*!M=uC!3NiZ-+6OoblMp$}=e(E6icA&R@)^dTRh z52CI|Wby}q}eEu2Udf|LN{i|zf{oTcvksY1Ofjt%&HsbC1f^XA|JHS7YiaW zL|sKQn_rM#lwXufx~PCViW8WLfZJ&pA@5jRRv=rwa>ZV+eGrt7su1Ax12rM=+VR_4 z?zghfFs|a7=TmQ;`^G#a<13>}j1P~#@+JtD4ldJpL#(Ku(E@Q-1_*0bpIXe-I)0n+ z4kq9r5A}s$^4joA*5{|0RQ$_E-6R8k0NR=H^^!p3>{UC7&4KE`&kUpYo9>{DN!(^}EN-?dlU!onV1ABI?|r;;A5q)xFg#;6 zAdX-X&S9!1)A!3?q&r>ZnAUUp{S$byC#xUZ)s@GIQtp70)ot2dWZ-u-wAGorgD98^)wu;W z1nYI{R_HBga)Qz$n;Cy5_hJO9=NukH5R99C_iCe2h=yX~kLLrz7t|u%FZ=mbHK1;J zMC{}#4VB$-?z^}1kiL}#3UKc|-utDDo*y>ve0mQgla3oC;U(!~-kXE~m&_5#^$-uQ zC~@88xQD&sS%v*m?02)?Kr+p)*GkaNLX%W|RS&xU1CQ*$&(WzKzPZ$WFpXqQxbpCS z_;Rhfj-}?8fmx#0xAh=4Rh#Gi?PQ_%gsZVg%gn<8Cx#j9Xc1JbL;u;*4sc*Qy6OuA-vZ6xLiy zZTp8QY^QHe6RD{S)sst=`7lqpUTfLx0Q=QM;ZSKA8~IO(R=u2zvC2TFl^=mS-oV!< znWOKDbs6UzWa3t;iS9_kk-hEPn}1~%thn-7GF5bBNDuM2Z8i^u89_HxsK?(0y$!P) zLxB+@kfA4=VZs=Ks;~Xm;GoY(S?r@*x@3_fRJH1uXzAeUZrEsqT=Ab1?M{KBC|`+Y ze$izip+EcNgX|}JsD`XIuhe&@Do5n@mNRN6<~?n@F9x^sAe*fki+F%X_2+cSk3LTS=t>QaIx+<+mT&6*nkMKoxZ1asSuAH>d`5$WGOS<(EvKwhki zsRI|*%@qjzWhln-27B4+p1I|n@B4Ns#C5(3&ULQZu5W}~2D=_XH^nWQ{R%11OeDf> zqRVzFx{Zqcx9fpVD|K-Vw}nV5k}pgxY{_7GltZKPEE zu=w4=RNVd)RI8fC3yq__4!h5`o<26#)y>NsSVEt#Tj|=3;FJ#`;(dCJ`e)|_I~P3W z@%Ilp%M#e8k;i@SA|V}<0|04iG$3ZtErFbCtzEa56DQ=T0k+rOizgLdS?ZL)L%{CF zRA+kA?8RqoY*d!4p+bAA^I39Zx?Y6CcuhnDnGPQvTTL`<2J5Bj=f&q&6483nbI%9*2>Os;fXly@Y(*u84=%~z+w({}Mp z#iHGO7Qq8(F~KEt?B`EVhYrm9lLYi%vgmNXw7Rq_MyvNG3DJ%iJ7YM(* z0d)0}dcapB`;Z2VIU*^vuzlL@5_?Qu)2~Gx2bOChXekJeL!Pln@Z;U3h)aSO3!H?_ zx^TsyT<&K*6a*FKoud^pf)t8nLQ`6P!q^^M%V!tbKTNTcC(lz1mfCZ=da-yG>ZCMzCc(68VQTwhV%#*Bt3!?a8i!3j#SsT$qw1LemK)Zsp_P!qe5f?8QV2< zY3d9Vg2_%0f)jg^ZGx5va6zXFTjx(362XdPy^MrjA%f$k%em}3t_lz{qWQc!e_zS&c z&9CQ;6;;U_br6m9XQZoqAu+L5=he*~aQOe&?OC4d44M8L$5*P(Wd9t*WLG4s6lj}= z;K_*1(Y$?T8W;IPcR4-kDPnFh_X0X&mmWIMq^%Pg6gqo{m$|2WeW&BaP4Ar`Q5nJo zq>|*YtRG7XU$Wa`J*kciLQ18L!dFovje0BPiTaZcTr0L4l~po7R*;guF2Hz%ay>$@ z3}qQG^|`eA;19#WfZNxXEtVpji>2{vDurLJ4d)wZ`@he?-R`uQ!-;}F%rywQo_v9d2PBW9{K-6@#y(`e$)ug$j4J=5Z% z3#*_jFgRBsu%w>6B*iS+W^`npaXD}3O_hIzWgtPQb^bF_Vtg*Nd+lYGjM|K>Dc}8+ zxH2az`_BbldMo~^*Cc(G7Zw&}1$GikoR*c3pkCESrA$^XQMJM#iVVbOqdzS^^|x$e zz(W1ta;1Z1na#}tr#cWUHva&-V0x8(Q@W9v^uxmnNkP7(ivHy7D}GM^F8aQ@NrOiL z32>&)qAm%~5I`-t^|&g@c>z#jatl~rOwU7HJ|Gyqfm!JtOdr^(yKzs8bELp6u`)W^Q9nn!r*UJ}I-GM@MfEQsAmPNY6w2 z@G&#Jb_xAeDxSpb{*y)-;Y&RraQc4_tw3=H)3``lew<7c zG_){_FLAOuLf<(;(Gk3EJ!u+vGSlMD#VewpX(EjpL|GWHXzb$a4viFDu+)@(09>byYh} z`!QSKEc!@po_{wHy`2R*7A9Z^voi^-@KE<4_R{OiYh&q31w4>NCPn-wvN7!) zAP>1drzgK*cEtJ{z%xHU)3mX&+|T+&=DBT&%1!bCZ9T{cL8N+SmP37@XW-PON}eTz zq*jz}s?-;$OS})Rz(KI$cp1hS2>{<(`}I?Asu5za)niG~GxO8y2z9^mT~FUY=aGb( zNt|jT_jfT(SX9kl$0pDp8g*B5L8l3Fly!JNtW9siguzXYUy0;K!AN60>C-3z34A4Pw^`V|EM zpEno!p}G>LD&-aECd>Y0s5U`{Hdy9KTICABA*Nb4w)2>NPfc<4`NwiI%Y-nt%y-1n zM@$GSjHoSWY#O3{>P^e^Pal8U#>#->=Db>9Jd6ZXIqY!v3yp_lm({07S4ZCmkM5h; zq3x~kvJVJ-TDsmo6zjUA>3t_~f*0)(1=|EN-~`DwA(t*Y`uZd#-Vvk)g=ha?%o>QH zZX>X|)^@y@DUh*86MoqhC|Ag@(y93HgjD#NS@wn|%I1&nncJV|00y+^M8y~|!}jR! zB<-8$kH8(4_n`KXnf+Fw*DEP35nMJLdMDpw`2$1&K;iUMcjPNs2cNq0M0vKrKkLiN+^x7uZy&5YYR9i*% zXGt5E3KnY525c@<@O+$k@$4KwCQOaNxqJmFrD=4fpZ{59hA9S$9XHoExH6s!tKXX> zI&IG);nut^brgA+-+*V~@OPNVySA=DJD-wU8;K|^(KELxqmw5@Tsx0!pu#(1De?AzVfuI{ZQ}*j2^ew0WB)I& zl%F=NDDq0g1Rm=8%oP6JQr;O07-8kF&V-WHRRxhxm-u0yyTR+S9|TLF?;T~_7?77y z=m~S}NyO*tV@I@bfZ=!#H;oB9>=J(;+qWKp>W*@PNkNoAf$C@$#;~)+lgyW<|D^T5 zjD>~eM@n*+al3wSDGoHJ6N81$yU^_DYk|a0hqTY)w%D{gDe(vh+G?5dWl8*0{OqzI z$W*gBu*!1YNcSG?|@5UHV(~Ik8 zVX&rX&jo2XNzKhJ0}!stZri$-% z+wC!Qcg=z{hzGV@#lvrgCK+ZAuNk#aPfw*yb5k1_++1to_eu>hGW+)F;`Tp~u=p!%mb6IUK z=sS{Bs$WQ{j+T2UA1cFS^86iKRPP}_CaJ>Jyvh0ERST;hki(K@nOy#sacGvcf6^{K z`A*ghPWH4i3d`@w9m(!N+Wa#Xmwx?< zd=d_l0OmcjL(Na^;;vUoBFd_NJW*4zxZOm#($wC{%JhWe_4E2V%{Pt+&YrV2_iVEJ zSyXbXjs10?l`f3(p>-PIK?F_vf)*A?j7vW^ID-h;!mZ#+^yttHx8iB}=v>Yi=z!Lo$d2J)XG;MP6JD4D@;hehF4ah zHU)Ns^1n_QN0Ql(3E!T{g%k#XNn+Wg7p}vKadv|ua-GJ%FKS~X?}gQ^`BNv^9_GLs zb{LHwJCD30db;Z*A zI+Ne3A1S;7#m*I`BicupvZ1H~r{Ge7m1~-NkwmAW1X0H{AYHV|7W=i3)}b#DyYbfi zX)|DY)~`Sxt`7kdSsyWis zTlTOct{Qz%@9@L3%a#~R01x%z5?@lZ)Pc(LwTVjB=$w6@(>wYQ)J9?$ROtVw7jE%) z{Jr$7kUw>sIoKo%2w1MO(9wf?Jtut-dNgb&h#n5{iZiIQo{kTJaNeNa7`nITA z-5MfRfB2GVlOCofcw`ucd~@N5xh4AH&5CuM5VCBAj@3#K2ieP2M3qH@QdAzoAOLNG z;km+%IIJopMuhFUE(drx8p;tT-(%39EX8apVZr8MUHwFzP7(NQ5T-knfI9_*Jvl~5 zs_&07>^Du$NMP>cCDoh$ON^&`AwqT&A6=Fy0`s78$3H(9@>k(+6B=)3^W0 zkKpi3pzQKIv>?!eaA zse^A^M{^A#$`X(~h`-K$m%rY<1U>~O0qm7x(}H@KGpPx_VM_-NPdTg(XI@0zXa`CA zSvpWX5Q#04v^zK>oil>Gf&{VLKqalmN-9I*Y102emQ_KHoR*?VC&aN8u%vm7I`dog zQ`3kzpn;c^V`|F&1h+9F^|!y@>g}ccqm|E;V?r+1g(?(v>EV-DWR6wc-UYuBgL;Pt z0~0vctP09NhV>};8CxCkak*iRQ6&?d|UKb{X!#Bj!u~W>0TSqN@lL zIn#Z}aX48cA|J>(J~BXJZy8(x3lUw@GsorZ+Y_BBGS!9mi6+$ zT*yc19%X82Qqqrxc1UHmEc%eyMZN* z>k7lhlD;@~Ui2Uyzdgs}{Rf8+F3ffy zGcPclE)<+8LNxy=LJ)-raQuWZGT-5TN}9nz%l?7gtT*jVwdI!b0)xd@QOx@q%`I@$SknfVy+ZI3j|<_0A&l)T^d7^!p# zs;V=+cJ+^=>`EcQM&|Vvj{gMoJWtA@3r_$bWoifsWBfi{{~8I&v(Ez368~|22Ljic zeTJ}cu0ATkRP#PPI)1Z2tBUNOkb-0hWS_`Df?wsY|0HHk4>`r&78TqM;zLy2V*88n zwY)u892G&cII~-RaJH~mG#+aqnR`~%bAEt3sFwiZe4Ee_#bGr_ z6654e_x?+xi|e{V=xxAK6T$y3mcDvpP+tNB;Mz5vE!U+f&>3!DoO{3D2$U+A4Njk6 z{|&R4ha+xGtW)3%W;idiqs3Mvo^6_lfdSvub0rH&S_QSDz#Bwz{vFykBNx}*hsCxF zLjZtcQ4t_IEcoXNdE46_PKv>}cbm_t5mB9)WXSMyw2L;#Sd_ZsYgkp_!1iB^-0Tl> zCTB2q&Tj+@_R3%sZ-6p#H@t9KwhOSeV+}P^-~`AFi`}k1bfi#|RYF1oMqc2oWSgpW zK@ao}N;g|+1Z3CV?AVPE?d-R3n4~;uTAi#e@B~9!c?V4$yC)<8#HD|KiY^PT9yK{{ zC(HjA;kzg=eC<(Ort0EP0RNmuy-U}Pt}5_b6q?{N6Rae%u%8m0Qw^R15YM-+c2oDK+c7CGXFLW06%uZC0>W2u=RO+4XF+-(6 z`4}h3`T)Y7{Z=XzL4eE0?Lee!a#m{K?t;=Ci5^WHeq?tj;ykZV_7|B^K24ENHh9nV zf*&p_f%jpP_K?bRm;q5sTPKW6Yu{j>klF%UX2Rm3TTtsYMD+0WxbDuMnW%GdIuri1u>S#X#YwR&3?* z3z*6n029}Q69${CB>=n_kPDz*<`Fd@1k+b~50v`ij7v{?^o6%Is5f5oH4fg;G46hg zLh7{tpMG*E#i}U>nXqL04&b291Kg|W#>@5uRndTkz%nciU40OlSt7TD50Zh3D9@ir z^dR|&Xs5CN9(a9_H02@YiHS10qStkpaem z#h@gJL(H@=77D9l4km(HNQaoz^7FS6n2Roj=oj^+_7NDAe?RE_fUi7yw?8I+@|An|w()CmsSd zevkWbtVBvZNK&DK{RoNhZ^SZX&t}bx!h}_GK&M-yn3JU!d##;VH z*rZ8o@QB`%qgzRa+wmRTa%*ZmGzT}KIA20+m5{Kb2AM(`@@$4kc3NEnw&@UkWZd2F zulX!3EvuewnSD(o6PC5>)HjDOTl4THTriPu2FDpVJA;xNtyXh5qX!W2C&d@~KtvXq z^Dp&Y@!Pm_Ma&jG4&P|cOCWFvn?gieuOsIBSL3SEs2&Z3ilzVoQ_E(AnA{dFyvImu z>=HQXf`3rLC~G;yZouS12tI;zMXh^yD5XzR5L9UiYKi)^@P zYov)8s8BIFKI6BBL9C%Sy)Dq$j&|#NQ0U_>6S6y?;^sWT?A)wTU_R&pb#p2N1Zmd4 zd?La36XMAdA+Cj>t_k64w4wMq?9&n57Dq-F>Vo`55V-?f*FShJ7n5JxVnpKf6DLUN zGta)O+?fPavHR6lUBDK-c0D0fYQtfHpvq?HhG*Xp6=HSB3`dC0Z z!}Jju30E~FbvS`Y)v~9#?OG)?wpr><{ zb;~5}M%(vB(s4Zf;v9ni7}%K1aJy$eDFzSz{^GkV{=`Xm^aN4lya{;=`MYJ~M#}{J zziLKr$0JF-$DxfYg5!ZCiSnP$_nuI+)%IVB#ThTK4DMTjVxi{g~uCFo`4FJU-Sdao4p~VLy`L&Gb+I0Wv+J zr~?@(aK(`-1V#Y1WfBEuLAnB7Rdq~akpB;IXQc@kt%JQ%I7cYdDlqu*6smun2!E1b zCeUBNq=^;_>2U-VzyV9b%%3HF}Qxx z%un^+T^*KJR9zWBF8Rh>^1(051eN5%zRdW~YHdb)eoAFEPGuylp4Gr?kI#i5`6qsb z|J1*l+jf#jSor9#8dxd~L;*|0gY2|Wx8%(Ani;e=a0xp{-Ny&q;NIPplpq$8z-PBP z(6XHJ?39BC9qteN{N<7o3?MTz`&t(ZTC)xkA)2`k9H#5aGEFbY+INMLdJNLkkv)+Z zA~d5Myx&;wp}@dXd!R2$mc+)Z`FQXa$rV%}wrrIJ z+bfHAxVUjHgGX!u{%vFmM0382S7^Ky-kcs#eERrwrhnk?o9C$FA$P~++bfRu@2ooB z*J%LtUMHN*jH}T>ExUS>${!>={;VR?X?rv=Tq0ngs!{_)K%9SIMaA?+DTHlGkLk;ns5<5vnAcu;GglOd_hqUt~ZgsAKV?IqwCAXunaS87lNohfm87^qRIXI zd&(kS<4@>?&j8&qGL69q;F>mC9`-9pj`NO$ZnJhYFFq#n*4NufAHvvCOzwPoVkm_2 zALtFS+h_&dNNFH#F26|iS?gVcCPG6`<<|ssX4|dajd12TL=qfqouOEWE+&0iya5&d zwF)|{m8_T^h6{`s&nrZoh4&_&M`kFuEj4bZc!WjCRO9YR+EMY}`GxyQxot(qD?+kc z2FuXlg4z82@DTVKTRk=XHy^u~OCNy4`Z2YjRYZCFN?x#v#Bz!`t$<(3{b1u?(pYvU zqRwLh<||$Lj)dXls0_xCQZ3egc`{yvA(--2t$9+a3TqP-8k&y`jW;)6=ez5#+c z#C6LreG>h*sGa*IEq}4h{{GTzzQB8x7*mS)gW%ZAXuEvd%I8_rhq-Bn4&g#+Gs%)z zhJ)KNTkFT6z8_r7Uj^4zfuFRE!T=ox8>?Ent$izTI1s(Ukk-;V2ZrA5q-B(~rQPDh^9o#Pl353+DfGAE{j zRpjE?qBFn@pNifnbBc5o2DcJ7`xPhayJ2}6B|gM_u-^JX!?wY_{6wy8sin2&gMk4Z zM)Jzk-m_BHy95~N(znr{<%)tq1qoe~P(9K@*FSk3_GTdH2Kdz>5AO5o;!(#F z#@##4OroIv2>~6#N^bw#BY<$Q&t;hg({u=ps>f#(4wT5t7B7;bOm;vC7?^dm$YBb{ zf@1>;=v7mbxr|Di3k7gGaJN7n%36bS)jRglBk8?%V%p-r&|VTCRcs1mx&$oQjA<{x zTzST_=b#8rebiET^cE&Mo5DekQTt6-euf%yG*-Bc3&}+S4D#*`yZN=lh92AWtk@OI z+T>HpN+Dl}5R6j1C@L%ucS2fi&o3qr!!(>Qsm z`vv4b;lHkh(^rsbAZNc#kH{$GMN%>jklh-=D(62L6XPkpfVd(JfhZC!9?Ri zQo;MJB`1)X2P2bxUz>95d^a*opnx*onPUpp3=jg#NWt%Tq}aB!&nvk#&cd`O$Q7-Nj3!bfjB za>6qiLGU5%PD~Uq;XZo zMzcQMq3KG*9x1OGQ4P0MfA9-#xH$;5`P_(Iq`|1J3%5H%ccy@y5pqtMBBM!hR^VOG zBH?~t?U8W`20Gl$1MjsBPhNZPT+P4%J-orZMZ$^y#kXIfgHHOSlS_^)K;2wTUPoiY z%nuT~Gutz=Z7Zg~B7*93MUmw38!-~jkd-vM+#Eo5{{ z1U-dMGFEGoAf!jk`2OQs&uO-u%j8k$-OtdNIk5p&M2lqHnLNwYvv23aud?r(nVpw2hwg&N zx>j9ke^E6-<_UFsKzuvAs~T0c-cZgc5|V+y28Y3-QOvGSfch&Rzl-d2U@-Lc3ZZ9_ zgd);I{j?dx#GR0k#K?5+WVgPRZFtPcL2?$4Mv+!%VDI8>ETK3MQ8Zi!xh|W#n5icd zEhAW3y2FMv@e|yU zHEdt(#Tdb5N%ZX@owBk{eFHbTY70GBTvQ!M8Oc%nJrT#hy#GpQ5YNFRb@S)_{JpCU z0>LC=tO}zM&-yN7R$Y+QR+CpF&p+wjVKqi{6J~))Gz#X1z0l4S{ZOOKwwPfKq0gc9 zi}(y%zI@?)qwPX1aBOQWvVwv{-}e>XM9>6Ya+N2NXAoGSCdQQDipaG*A>kYs>TA1n z53fueS9m0VU^}ugT=m9y8l=Ysop-XA7t%mKWs%3rOte%=(NREFj0okotUT zWC8Vv61nlg0-|$%?Z}BNApB2n1|8tKqL*ulI)>xt#^{K4{pM?(a&lDd$M2D%>C*aL zG;1%;cfi?oZ|*f?08i{q8Ye4B^CaVjEf^DEZ@1LtnPtgU%6;8%albTu%zk>idQSRqZpAM zuBYtH?i3}zjvggnn)eh#1;s#4aRU-`woO_O8QJ%d89RJom*OM2$zSkPsl;6{^6DuB}5eu=>E9+KM{_c&;?(u_{3XD2}Hw^};y-Ai6Mjz{l0 z+jIV?JXoJ5lGwOQ%2LXG0`{~~xA?!ItWkyG3n2pq-BbppWD9cW7&FS@+)o~R7oRDb@`75_tto?PmwWzkk+`{UNbm*l6=|49BLgFO@#-!U~0>o{R!npc+ zqyiG}MvK+;_iJK*r@9FL{KAKrxhxn$g>A5T!l@4S#WZUwH&1G!p%OZz_P~phX-3Mq zOR_`1a0wNlZjwwpm>dHaM>{#W}`sA2DOo88bD$J2NGnRnOmxGH(- z!C2kI5$Okq#64n3d_}ZuL}loJ*&%QJmpHK*#LFj?Da0?!p2qF*u)mW_<$M3>!Fa1s z=5OXsp02K*;*#fr7zo^!8F+R zE9b>E)dSM3(BRBEKOvECo-SmR(L=1!l5vL7cg%7)*tOcwQkwZGFC(h2teA=4TTtxe zXk@3=qzonaEgN^8>ifC5_KDcw))c1j-lq@~rGhGxk<>>@``3@`Zt+ZM!Na|MvCsII zoT0Rc&u|@G4E8;%54`oCJgnu_M`UD1r1*RFFkY0lNitpB3#ZbLyTZW7FP{RxoJm7* zRAc)^-9!4kAJ^@wEJ(@Xo~aa;p5tnUSE{Y>+`g^h^YcyWO2z${}io9uCFz-{!GB!kp9rh%x z&l_uA?9F3`c!`a-;C^TD36AJk&ODe`Fa>2-tDeA-uaqz zu4Q|WV3DmW|Ig;WrHzol!PA1hf;eo?H!M?JrVxBK<|4S?Q+Zh{G9mhoU_XXg_;1io zs~tU29QZhTJLOSV;0%oGiP=jIUzf?nDCxutw!T}*u>2_f?S z){Z)sy%yOq4jva7pUDmCbZ^V!@+FG&U#MH?jcQ4Vr3`$j-TJs|E}C#ORZebA)afF5 z_SkLjUJREP{pZ!~!-b9SMMaLQ3l8M6t8XuB@!Uz_=|Z#0s=Q}<;NcPVnlIhfL-u4& zIhw!Sa}nxf(oM}V>%xLHyX{Ay`67~YU|*(R_-&BL^}gaaKlTU7>w*c+`)Kp`q{=T- zyt`YCV&z(;IKqW4%p?yYoq`z8n=8sMb)coK3rDZGVErcDbWe|JD?eH;&eQsBTw zoR7%+d!>+o-f6Yo_2_oQSHLdGk-R%sHdc$2CjZJC=9B#l3gl(Qv}482p94QI8n%(> zDffCo83a9^v5v=K`vVc%Ou4|KX6EL>Sl35mfzB7rvvr&GPIKuy;3&E5bSmGbqpfRl zoZ$VDZ|sz}O*T}gjEU-72f(bQhM{_<1UaEO4*C5W7r)y`mpdUPDUdY3k2>b-Khepf+j=Sliy5V2o3(Er;-ff9tr(!fFQkvzw z(qTmH*#@9L5Rlx+vCMtj#gvCTcAx(#jkTkZDA>X};U~Nn;dL4!AQ9Xv^nkV!vx`*N z4$n;YdLw`Ma`u7tg1AUBsIC%5F=W+mUKk!5S$FTEvDCQnSzE_Fmb6c{12|0mrPD#O z5o3q`7|xv9{N(RhDfu;5bd-mW?qyjUM)Wo$dCGdEssrK5u=l0P(@ErEM47 z$1d65bpEqC?B^;*VD?#!lA!B%O8KvD&ZBvtTB-dC@57G^cf`N>cX%^ zc(Vq!NbGam^DV{!cPe==L81d`PB~Vj<07(#yPRUZO&sg{b=N*L71WlH+y3FySBJVi zj12E$==fFrW~xit;p9K4fr^PM<0K91d3cVPB2e$n;5~K$!f=n8eJ$&G!m?DhBt1DA zt3c0q4PhG(NzyC0)HO+Zgj9?=w`iPM%`zh80T|Pj#%VwPvRfC})QbWEoTg=YfE7_KE>C49cI_e^kv@Pd5BApu0`i=nc}&KXN{#0Pzrx;^iC%xS?+;`MO$+-Iyb< zm}D2eq<)cmO9VJ??(ZR#0%QI#@*`V=-x5!Sf2?HypCL8|rPoG&Nk@n3 zi;(%PlgT44A!l}}h$p`KrLQeVt~vI13)_(R+*P#$mVgv1OnS?fibPJxFNFk3P=Z5f zIKScWZ1(PCr(zA0#ow+wH#Dq%L+&Kyy%jpkWx_EKZIX)*MPt~_`x%_pHKZm@<~KCo zaGr|ubD94hdLJ2`(kTh}MWRaJT?5Z zPF@|iCz5a;yUIHM!x}Dz6}OMuEm1NklkV zQXZPlgnwz2a_;e9qnpxSKxyz_eaRMFbs-!N3i!uyi@?5*-&g?!$Gux(2@mE?9Q^eT zJEu}@T4M8|gy0xkEedh9_;R_@RGP>-p#nrxvrHS?!$PsfMT_%FE2_M5YRZ~H74u?3i`V{-fg zM5?ANlAB#?WXS5fjioWF+PZMi&0}e3_gQSg?wCT_hnB<5KHj#bS~n&nAfM95=~k7} zN_@&A^)=ET9amaOwBEV$Pp2Bbekx~R(_Bc)u^*&Y@}7M$zGq47SV*kw#%m}HY0Z{i z+!`r%%kv4XORdxD`}jor+v(5o6@_o-e_dY12e|K{-)90>tTd#d({ge*Q+~+|km3!j zcn(!&s1q=|e5lBvPLLzAwUJo*ZCDg4aO}EC zZuk3jsGII%UMbRfo_BvCoU~gr(PKS1vUK>xjjO!Uj*$$vS#1Z>f{`&;X;0V7>dPP9 zKj?*)yzskLW(bu#-K8u21r-32<>W>@$^i_{1BieKL%<6kHk?;5}VxNMl5^*qgV3)6#J54_dU zVO%nAdM2d)e3WSwM2hOWyd5Py+)=}$qdY>L%_|{PJim7H8ExwQviB4h zD}RfP;pe0lO*rv9sK8MhK3F>D;(%APME~@ct0OZ2vGqNj23myhVkph&ahfk(6A6fH z7#E>{P6Vr9rvVb_7ZWChLNlAmtAv<3^GDbzToT^q z2SXcvXtw z{J9HQ+Bfb$u*u7%RzJaO%x%?_$Bq`oHO=0A;OszssLdK#o4oy`(U5hPV9@gkO^;zO zJ)^J%-bCaLmq3rXX`fHL5AMj~Qrn;3$6Mwgbcx=JPz^z_gHShv+0e;0WZXsktgohu zl0dj)C!)e!lbyZcNmTIVrc>K(bL@APtIB0U*ZW?GRpbZrp?-g|e?TMfP+ z#HrH8e&3_c`u2hdfUgQ}Di+0qqODy~PU0!(u{7l6yXxZGSA(|UE)UER(JBg-sC zb8o52BtGz)&8cnOIb&PDZ|`q0E;NmH(d2eRJxCXLOa#@Le%m|d+{+kp>gxB3?-awp zD%Q~Dhx0D#>P(Lg?c+YN47Tk-zefN?n)ZkgE(JY%_~iTR@MMDk6;gdCqGp0cEY%x0 zQ2t)+D>CzmL@Pjb(-5YAVuS=^X;NOyInI0go++A1o#!KCD9!NVsM_~*(gi|I<#X}i zMqL#-4cNTG;6W$!NxKc7{Bp+T%CPQ z>UKkwtn{FZjvhm?~ z%3n#Mt8h$VPMP)+4X&VPept^v5_{ntR$+=ReTIT_(e2^_wjF!C)rXmGIsc2UvyO`L z4d1*pIy3^(rJyo&OE*X;BFIqE9nxLW4bm+jsB{a`-Q7q?OSd%a^ZNbmp0j(-{;xB` z`#y2Ub$#wmkup)W4vTSFjo_zkxewb(!&$>_@TC1gLnnxTBTa>5q88m%I+$d6girh_ z2x3?w4D7;zO{FTa9mHj-ichqLWq*Rwn5qj@XnVq@fAnCNpgTj3Az5-;5+(cvZ44&g z@3vZ9h5Q+0kBn%G&NOe^d9&qkINrz76@`bDb>V_^dvPjC$^-m^_c;f+RKOAv7rz=o z|M;Poez0aF_?c+OdY8zpt&dMmral9k2dL@3j1(2^Xn%4VtgYUW;x^n5Dw!0Bw247xXV*07SC@9Qp+^k&2mM%&%0lL{JbKv6BiX% zZ@N^^?FMw4RG2xfJ@DM5+uO^PaSc{`xHf2BH8WExRLG5Mw*f0fOl0weKyl_gj()|v zGO0YMR8Dhy-U`*pjo_dLI2#0%mEH1(C3(|gDKR8iEf4jB*xXJ`(+Q=Ex1=r-c{2g8 zos!ZbSO$|8lZ!r`_Dg_*RTtB!)61*Q4E9^J&rfq(Cx5ih>r78qN-_2 z4(qcNdaFgfwq&d^Bl-)R410Swgzxc(X#EnzOYM68u3umlYct~s$>d)zp;^D&epcg~ z7fuU%z95RnZEr8(L-vaR-TfBnxosvCWZ{xtw%+vpoIb@3_EQ3PTo4^BkQ3S{Z-ak5 zHOVKT(mx4P2Cn_u5g(3b(B}M>u&w^exQLYCIVH_7<~+@yf-8+pL?iLn(!m$of7Wl+ zCclh%!nbTvMRGw9EU8eR4ix73MgeLH6f|bTmmI#;eBFePOl8p-0?>%tXf4$cMnq2@ z3{UH2QRqkX;1QB52s6-!U3ZGB;7Fo>AtU24Q*i|+jC^+y^QM6o??%~oY5A~CAuc=M zUu6(L;#Op!3a>j&bZSk=@%5h(!TeG$C+9u9@R7~8-RGd(XA)mSxnl4|fp-22@xrVp z0n+#y;duxutCg(U`HM#t#V0o!yc}Wo9^VoK3 zJe{n;d`dihX+v-Dv6^&y(O>X~gk>Htuj~MU$pKy=B>|`$Ewop~>YV?jJ~SSQICX=mp>1KK4t9E}BkC6@crGr% zgnruR;C5}QOyw^{?25+j%R}sKY9~h1G0WlVh}+>PTLC(W&pd!yC_luXw80o<^)YC> z*_v|tYSQ~2wtS&n@nd+X&MAG0Os0m%q5IOU-?|e?A4i-eO!iAeIE+kD0Shxzu=OcR zMdDz~ZUBkSYb%j%(#zp*9%6sQ1slOxo-JA3H^8^pBqcD^_^NiCkojN0f zV_NC+A0s~GUFdNOT?_u8j|O^~0{Gi&4I!v5PP=+2B?|H>%$z@-Y>-%k(4>&lx~zjZ zAFqw<2K>aNTH=0m&xai9@9P1c!qF4rT&tdm>P8?x*!JASyLv4Elx1(9b&MumF=*jr zS;pev>8Xp+f9}>tP0KZ4CMsf|d)OkuC7%YG^qzWC+RhWpN5LXn&2-bC!;t5g^;jhj zH0vGOXLPabh;4Zuow`v*`p~mEU>f3^NKB<p*X6LJD?)*>tmOQ=m1Ilq|Je ziut1%8TPF_=-C6&;G*hTb8{hFURexkj}OaXSW-l@A)Sx1h?c`W@a*xd$6bfTFamND zG(!8xzM;SzAx_w*5YyZ>1W|>cl)x+`!+_Cmw@2ac{ z{K7yzv6Y7&rk?bX>o}~2ch>7_w92%KX*@X-4hS%8ww=XZq%v;RV9W z`bFvfUz$)2s1wv209@%7w zS3{&TG2j%`RyT@Qjq`Z{I49$jBS09A+&Bo=MLq&zf(HNy{zYdNSYA*XyWpa$S;s*pep!SjoZw9OwvgxuifN=}L=9$(F*5u@x_~q9T5}4Jp zXt4`NzF`y^W({_X}rC`sL--;vE%)p`TCw32N6Smmf{PP`u)Fzg(#BV~)a&)S z?r~R01&6%v**yLSR0>}lI$cOt^a3-Vgxe7FBvr6$Fz8s+AZQjeWC^^0E+G&Kh+%Fz zvspE_(AX>ID&;excUoE0&m~48vnS0Wze1B^+1ju;Po#LStf>B?eOu)DgnJHz#_!}x z&GM2NO1B-eLm2XB(41DRt0vTb5;}u-SIohv!kHuU_aU-W$~%@WKSm9yPpKN;fJ*BQ zrjF;fQ`Yqsc8^p-UKjLw)b4n|7~_I?A>LxmHF2s%@X<@4wf`h6>|O#x68l~++PMwOvm%HraMX+FGR!PT|H?S>H1k@fh# z!{6PAkyQp5Hy1WURXml6B^qkj2e?HXs9B@tuvcXr?+$9kK|t+8^SLORXj0uEq7+ckW>en1FVSZ8OEb}n#M%Y4f+etTdP65Ixg zv_lVN)&PTz!WdMFQHhD`XJW2_Rni~C&)UumggqFKFFyFp!D#m{7GPG}XFNc4wNF2t z{dHhamtgy=rwKUdG160c$qY9 zm!*IusJv>PpspWM3P_UZou7EFx8L8rdG`W{mi#|C^^?z8#L>nnXY{cZFLbqtHLphz zn)vxS>zQu`1SHl6a5D}*pf_HK5teF(?)THpdu|m4cz<|Zi>1<39TmKw8E;$NNhX)# zFy=2Cn_Loy0{a)j53QbQq&FPgPCm%H;lb71TzS2r7XvV`ykJ$mH;j>j{CXUp<=}a@ z;Hr365eb8rit+*V9Qv0-*o~hk{ziPC?b%vMupME}Uyzw8SE(Mb`$^vd=`@K9lUcJ{ z8;D7W^D^AFeDdfYOEe5e*wr$Y0O>4J(+kt4bcazn6ouWRVw1CDptUHVWlpuOtfCTb zAb#(_Kh&CfwZZCj2#e^gcohj(@E=YqHqX%-jOYr6TXA`){+zw!P@uMluq zwn1KI4MTO5Ue44yK$i=GWH0*#&@eIc(KW6ri$|*yQw39D@D10b_l^^)=4JOs&q3*M z!1GmC9KD7a@GawjCZVy8t=kQ#2#xU{`B7hs4hS9>iJ8Xnt)!x4<^u;y;rds>1)}m` zPR=R$tC8?juo|k0PekO;8umR|ffbU{x0*-@Ech_tyoDwp+WoiSgIsCNxBkz9r`2Bw z5tjaM!5`ANjS-?=DrTfU8O#7b*eBMq^%cwZpbQ10PT z`WZE*0%Tc%?{Bu=ZK0+`Y4_LRsk?NY!0N<;u*ua_jTVmQ3CfO7fI9F^MgNZQli!R; zt)_n1JhD!G8gAEabKnwt$#7WKkKw3R13aMf(QFepG5I$_L6%Z?|82pZsp}N(~ai)RRyY=b0`VpfJ!W4>n$%0D?`@>IF9o z?$~xMAgWeSO^*K2p9-w@Z@Agv1O!;fIE3;D!wbk6oTqOCB!~x~rH|l=SL`pQUjf4p zUlm`;ZQwoKs<-3cOA9xQfBUvJt0ekfo?PEel!?2}@0)d~7j-oT{#SaULWy&Z)&jqp zwxjAqD*|{9+CXrV@e6&yW|)Wm_s}N~*DP|Yfr5g>5)#DV@rR9zh=eEmfp{i=uU~!T zrK_g0?KBp^G;O4eu7fxTo+xj0a@t85ve=_~0|gW$=<4eCjqy!9$aR0lojFIkE|qxS zSWE+l9XA_NRtRy3QpVh>%+s!2T=Qly6g&4@Y2bKjAymm zEC1=PgL5<+g-=rjdF9jCv!k`@G3|zU?s9oX&&5{^646q==cPP{%4GxhmKd?cLri`H z`bi#Y^4e1HcM{OtU1<{7mOk!d!(4})muE;H@CQtiwd39j zx@@>^eg8@jv;xuwOTIs!{aPQ#Dr8HEgF$IiO_RrfJH!I78h_)Kaod$m5GH@_wz86@ zd!r8ivXdF9K4)lA#}WYJj^enCTd>8!EX{X(t~K6luOdNM{X9FZg9}r8HP(iB;SSukI1|&W zWXHi5*tCfZ(^bm1CCsRyf+bp!zX&2ylEdJg^}mj(`rV(9o0lUWf^;XjQJ@5XoHVFXw(5@w$!G@^P$JJ%7f@X8)+t4X-WsS0 zloywoMeP%Cag8WcC>kQn!t9E|kF?^-j=ivifLa@zV@?loFk2ViogWXoqWZ6X>kfM< zMeOi=nV055Q!jw%`BC(D%&ch5XNREwtz-nG(F#W)IenpUW7T#XTjNGWo|!M@)B0hm zN+8k?jm9RL8BmTP$$ujKc$tH%MEF>#03(y?BuLqo_UmkxYIq> zNslvbdyD6Lfnz60GrqpxG}Sb9sV&Af8;&TXh~~9c$tlMpp<{hVH1kXgALCHf@l6Te z+hWK|WqXuwkGL-EM*i9xR^<%*6h+A1bRIe<8s6iJblWb(Vw5g|JCr)XvO+1^6>z??dCqpzDNW|^PlxmX`N(^qGi{rE&gU!GZ_(wa-DpIyx`F+Y!S2-HH= zW1?JlN$&s2fj2(cC9l4>CySYkPuDGmjJNi_^lMJ9YzZ&llul&Q!eD-uw z);z#Ej(m!hJl4jv3)fkZd&*@7!&s&Met!k<0c2ZO(2bmUhG=jZ2lhV^BHaqOALIU{s8 zcT8*g$-Tc*UlYtJCwI>`)yOUUrRy6BI@MTCwlAq`k=? z1S{5&k=ej8x_KHj7G3Q@)17r@l(KAGiPx0na2<9 z;qGD&50ezF)pWlN?ERNt#88rT1=?JPYJkWU;~c3>y9hu)vN}_%Q%+F^UU-P)Zcq<) z?cSg()>O(#bDH=~CN>m|WGLx|8S{8@k>+Z_Caq^B>fj%|IYgXXBNv&iSSgF5Zg1H6 za$22H_rLEvlA*_X&mb69zTi6YlpcGCVowAy1k3R2emwMWeHk=nb13yT_yvYt7yCF0 z{5_5Z4TSZjs}b^Ketv3qlW+d5ZOR#V`5Y#qWE~8TeG0qdMq0gc17M{wuDL!#F%e!* z-ro2K_N6beN~14aO@@wbj3Ziic7f_QdvOz@+h|jL*pb%6csq(g5(?iq{cXY==qx!I zHR^kYm?R$2?T0a>nuh1pnl&oL2uyO`ui_dMKL3rPJ*F>&8NpMLBq}^JoTJ7Fq>2QY zIHvh} R33*`@~`hOCTIsT1^9#m;xG)mZ@i~M?K_5q4P$V^K8B_1wQjvpQ6YVb@x zbaE}%OwuO04jC{D8_j{MZ#r@{ueOyxkEA3z`aI&oC1*$nxnomZ&KN|)cBZV&;w6b_ED#>!vO)_Ie`ZE z^vlBi@^keIkkS{Vh~6o}6p675mJ1b&y2S-?$`904i|do#wmU@iBG3A5`bg6|cGjIR zW7mZ%NceE+({*Q`Of#(@CZ679VNOSebF|V*CYvK>Z&Mvd7xjf|VN& zG>$hnl(iDzN@iY_;mqwrL`U@07lX-PjBq(QM4IXYk75QQFCG_+uPolxMk=mS*-kq7p%kc~w=~D{x%)rVMEx z?Tzl}27z9ERVm{1(Vn;D;SL`5;}6u`X|xN4j2%l0>XSG0i`)c@ZBZ|GFD6}v8s*J6 z*6pq-8Fn*>RISwomZ&-wK^;z;grTw?E@YQL@>8`qNqzEiq}}H6XEAB_i@iY@s7mR0 zHh46Av(v$~w@uAf3+-=3Ia8|aQAD92U7n|m=qhX$5Oh6(55|_#IOWz2wNIO+doF?0q&EmPbM4p=7tD#Qc!0gpQy{@(Bx{+Q*L7a8`ai?&E zOYqBzF5fWD_~>!xv%y$rYojusL>?2aFbDW z5y+)0Zt>8C-ibbaKh zZ^|}a3fXrGMs*Jb;=&L^&K9_4>?@i~Ni%%?ZS5@zZ}fRsaFY0{T^R(rSH88cK7M3` zb~y88cZj}6+eY z*-9Cg@k}MtBhE1E7zDIp%gV4tQsxF~fiO{0P|&kAF$x?F zea>5$I$h^uS~H0jd1mY0xKd8T>QJCHYkI08)6gus>)k`nwlBEE(?W=I*SqF}Ofz!BzQG$D z?DNQ;`8i|rT(ZBi(vIg@Z}^4#t@0j8@6#c)M?CxDQ8fQ*$h2h9E>pHel;d!PrR@-r zU%O^?AtS3xIW(-lqrxyyyaoH=(?Qm5${ox68k7Tj>X9LwR9=Xzk;aW(sv!;aoDBFf zytp8JejfgKO0%?Y%fe&e!~W`?WABC(336OheWD5dJPmwB7g{Rn(_fWr?@^N^r-^_T zNyv2v>{hx)jA8uKA_Nb=P2$_mJydwH#dsfBcjZ{(e*33?cgkGzljzsLj<(>z`0T0g z|4@cqK#h=+-7I%4G^D`)+GInkLD#CK(L?Pw{YReJIY!S}lB}vR?!n8x9~MmQ$ySm+ zZ>z3swIX{|8gh~h~)>R%qK4Yys7s@?LBH-x0`D$Y=80)Vckwq zv~WywxgFp__|0s4V{H>;F$PsvPsn|?(2lu#@)|Wj23@53zNC`~F<8N7B9?TfYZ&Uh z2x2@qQ?gFQnR>>Ur2|8>H*BU<`nLXT%&nc2{D7upB($AFRn0VkukZN^C<4i?<1Drr zVH(A5aD;z9vpv8oy@-`ctBtA?tVPpSua-)F( zLO5+gWC|)C?RFpg(mj6v2o!?xu)om==yMfK1Pl>cGOe^dW(vOF<3r7fEd1UONoxq3 z$rT^-DEl*rWQv<^FWcX}d!eNB9QhGJA>5T+`B(2|<+=qrHRpMX9EL~5v`bH)RP#G! z9o@82<%stW5cRO%qN3>0Xt2pVHN%$P;HdM3wl2bzdkfQX47&SnV1ZUY0D-$UwGXa zN=WLA(onOADNKA%clL-_dXL`}Fo$tq6GyU|BayS(M&I4)rNeO=^bB_E%OAG*l$|^F zONR{IndG%a29RL)oR+79h79la3P8wv{WLt5cm(7rctr%1`W|`@vL1F)dtaA=u#}f; zr`(6i@ay-ys*MM?BIkXuZi*ox4-$6XZ>-xFkx^AZj39s0v;Znjt{t0>ZTCd5&W$;m zWRKh^4KXZ!H^>U_`QZhoEUc3br5m%M9VNZg zDTe13r#Az$^A=rDDiZjO})3Ye()VF zf=07gx0R=J95!N$b6~j^qet9{BXRd9^)HF$&VoPt7A#%6J#sIcW7g2UBlIU2gL4Dh zz14r-{k;N|0&Ix3bXyPfLahM3XQPY~Isf#`#`{Y1h{_LFtufvLZRRuHg4dOm=U0LA z-+uP7PB}%j@uwdoW7Y&=P9r?9?9(D;gs}UHV0rSBZib|00Ux|_B|1DT2sa$zvV6Z%5@rSGuP;NKyNz*3ip!$+dlRT zoryPn%IkQrg_pxi>%tnp;w!Q0w86}3Z7IqoB{>1OkPR>R?3XR^0w3$lg`{^*6_9Pr z!j0_=k(_|Mt!JPWB`i^d3eR@dEllj;^j539LRwj~05BKfn7-%q3@ZFs)5c#TWLZHx z`R#ESU&T*!uI$bLc;QhuOXw1sZ=!D@O&;}x?F-w}4>m^lEXA>jRP2Y3K1{NtATIUA z1g8N4O+l`b3!z$~xH=Szwyf?Dlq3;&an6$+&Zo0{agVmWom6SVj2c#rm*eUiph-+> zux!cJ$yp-g)gTNG(Px+9;j}GXjP3li3u(HC2pRfF#Po*OeMB^l{yG<;J%|fvmYyL$ zCh@)U$G_V2xVDbtqh}tBLEr@h$Ie$QfzFn2q938j1Br6HIo2bJMd}Vq8||*C!(*~H zJ+f4LoBBtu#Z22buX#;9&lWS7Y?Bu|yqwWV!&j^+f-1xUqfb3#q+?XY8~&Cyv#hJP zg8l<3=p6|T7~D;J(62|d0tfro;{-If!->yFuWrorn=f0+RN4_=O9w0nJqO&4k6F|$ zR$`aNExO8u-npUOx%s2)8v4rjv!fF?0{>c6Bv<;`OIfZ3@{X;*gq{X*k3xqp9;~b6 zsJ$GZawNcXD3w6I;O-)Q>@`T%DM_dp2cz1Tw~SnBNIu)pAfNrI%yOo^*Mw>-m1RNW z8bPGu__HVK+>Jct=g4He)9KMENRf&6q&@H%Q}kYH8Rb@3Sn`m>9$X6bI)676ot(om zzbKCs+)oy<^dJ#&N64A$5KlUf=AL|v>#`yCPbf1N`wk=ZwA1+2${e0MWn}~FJmDyFDPo;um@ikvDCN2Y|~84 zM!CnFH8I1TqfD3!N7musMLRjGV5Ddxd@(LSY2~XjSVZxB1|EpFTt(%ei!aK3GVRL= z#A9fcYcBUc`tLlpW%!$|T0E>k+Yj0|FYUw}%3N5@i;yA&oG!cCX8<(hOBmg0B?2od ziH|(|aD4E*DFKStn((lh9glt~?93TDQ=B23eubHJ1cnkeMfs*@qYAsxVviyzmH|08 z*Y!b=2$udW55!Ek9O>(_5mGD%ZJElRcC0kDA>?}kX!n)Uo?!(r3{GP|&l^5zOEd{Q ze>j0ne3C{YacJ{}W}M!ZXfY(?MkXb}#3@CCm@FaM_hN+_(2>P3(!0*cr+ z3pzBaavO&t^-)`XVKXxjo% zvl>8bNbSa)3W{op{+j+f#_V`}j+K=7Aev+joGD#fLPcKTZX4Z;C#_<=DI2lKj%b%l;)3+;8+z=M%(|A?bVjSX9&(YYf)w&+Vh zFjJ}^iT-dsp^%@?KqhE<1?(ZUSPI|-AHbMtX#|o9u6{yr4Hcs#Z2fDC@dtp}Fv;dU zyCUE zm~P*Ed#9lO#1kCs2`Y{iG7%!FeB%~`W)+4@fRga)ntLe|Ij9`bQcA*H^H@Vzzp8=H ztB43P1Stk(!e9+t8IuemUO}SC8HD;8EXmILAa^=M{&i&-a4CUO|$03a) zf#)|*&4Sa$-G!@<`u84Iy9z%x^`7KR-3I;e7Mb8TET>XW!Lmt2my|u0)G7B!8&O-2 z959%vkDu{i5Bk!mauCG85W^K=S@ZHbXUT+(={NcK^=`Mgjv7JJBs#KDG7v>rVaiEQ zX7@!jTm?JE%{f~g$2^9eIo=SF`_Dh+ZM<8a5@z}xHMf^@USjjuA*U!}{Fl60D9eUf zVH2>yKUho@9O_tm`Qqo6^Yu;Y*<4BQ!EThB@L>7Tmy(Gi+fH7l-3@XLI8AN^LsNjI z(t_sEg2<`&~a`f&uY{4w_N!@`F!R>T{U3lctM^ReXD|S&UhOtc6yMYyER@ zi8DS5j>2ckUNWFp=hjefDAz#v=UCGxMF-W>tQR~CQo=V`ZiBaQzw)wGvpgapG$|M0 zug&=JEwu1h4A`-tP`|(JXaCf0%Num8W!!r@tpY?J-dY~7gC>$vXvBVMc0_{Kt4#F; zxAqwzO-h7oh4zHz>!ge*0S&@!P}~ zjw!RTWme{hY_Y7vRU#O1kE6&Y)_?0IJ%)rS_m3UD*W1`D9CAm?-GJNU1$-N| zwfUzw*%LYl;P*d$#WlJ7?WMq4`AAnU(XoqMZGBiskDwLnVt+P>Pb1PFXY8Z%e7D0T z;v68FVa{xKEGUvg^YnTHTIys}hQx@aSA_ytG?6j93soIketAnmKa&+V51s9cPWuY0 zXSgRN%rezW%H?~M`xDjL<9$0T*dwd*9e$JhoO$(rh9CVu8U%b}r!P)twmh<-zQ!7O zS3i@=B^3E7OUJ^--g(&k-teP=$%i~cQh41RA(E~*z}XA%kd^Odd#r?xB_2)&f6oM9 zLy@u)D{RyDxAbP@n*zczhDfquomgb+!Iz8ec>fgn*aSN<^G(081vfz4?yKo zjSWEX8KfeJTgD%g8DsT(Xwa*hNnu|Xyi4$y3y^22eGKSAA|nPrWfi_BZ!QBB>a$_?48Bk>{3t{ zs&>*DYo{d`K@G$m^JjI+XmE(Yb6u}KaUkngICmN$jJ67Hd%1*&uR(OsWPA8$GvMct~1}_bFl2%>Uj4V zYPx6+JXW<)GL`0rFq6U|vldETF>|nSaZ2JE@BZ&|;Yf*swa!KIvTc;!^dPWYIFJr2PIf`8B`2f-ley6sVUs%t@jznO^yIFsel3R z@KsbQGV*%-q0|s(-3Sf&B|$~7;No$kka>KUGurwi#Bm?TqQWoN_axXan9(`&JiLD& ziOPP^W~KjVW1+xR?xYs@TH2MRt;UTS0|PL*5P4+jeB^2D!z66PMK1T-I*XImMmuN< zbeycwXL^dl)I798ACyaNN-P0`-0(^L>keKoVu>CM_7*>64r9>oRXUo~#G=zH3Lv3y zI2K**mZx_-_^>wp)V8zFq^MB^$LcfHwZAf;1qM`sTVC+|A%|vH{hv2`Q0KR!ESN#y z4YJn{_3$?1ilk3v`Qr5IPeQiS#+eF$Uft<9pZYN|%V8K7P^mI9FLfk!*v}l6Ou1Yv zbopFL`MNU%?kimfO9M1&)P+XU=zs#1KY~XHrA>tLudPLGFY&s&!Z^uFP z(?<(^hF2s{S6l`o_~hRYT@DD5|2mozSotkH*eI5>xLQkW+v%Bbc8|xnG{k?ynVz~n z`c1t-a9*&rBs;%eSui#!L=hx1ldczfcOV~#Vm;#Z)~@f!j)FF9JFc%m1P8Rp<=g5| z_FvZ5&mt4s3f_Gqp~hyPvAkHOzc|Ebl@aI?^roJ~jZ9}4G<_v7yB_1CE@8uQ=_-4z3ZY2XX>&_ z80_Q)x7ag}h=)u@ZKGPEzM1A>*K~d9J6s%~f1LpJvTxvrzkP4>tN} z+zZ-C!))sB9h9SZMf<_&j#}GbFIf5ETC=UxM~>j?4FtvJA4UkD5B_|3V}!GgPGwnW zZG%6)X1KjfZ>*q+GDd^xkT3P|*Z)z4M<$dUflZZ7tiF;AtpfD-i-G1grX zU%nY(;tn*N;;tpXJ>M%NZCzn*=c*{MONnqbBNTS3hG`a5wZgBV3CIu{YV^j8YnyZ-FbIzh^E;;K}o1#+0g1k5xdro&lP$XT6(wE)njL(->yVsd8Fz@w~| z0Y9Mg1QNYxX1dQ0znH|J(%FEfvN8dQT{yUXS-Zlp5A_)}djvI~mbpPhxOslb8QVu& zhvC1LHx_CBJ4^a=!Qgdufm=vW$n8QFgCcAh95WepKuiQ z_imE-bFs~K$>;}Zn%KArF?uBgYIHs^+AA>9n%FXIQrI8G{}!lKc-zsQO{T4NCVnAsD5pR$~f8{67DRNLZ6 z+ulvHsou#_irO^`R&+`d)6L~ZX8Y<#nhsRn^#+OosG_$tjl*=JPGhiJQ*s(cJT&+v zkU3AKF&>Kx+Jy3CxO(nQY=hDcify3rliRAkAJPS|aQ4%RQx?G(K!a>Rmk_c*;($az)%FdQ`Gn|R3*4f)eXTV^-(ZE5l5UB)c za&z||HiVbc{p%u>zw|l#h5v{SHJs0ftF0u!EU9C_6#y6~%mgK=t7o7Y%7CHj{t&=I z<9zx2*dQ%@JX_}uO7%E7`+NMWtQR0HZUe);zvqN3(sBjg$LlZ~)udmgmSoRUyMOM@ z4-`%(bGQ7q#(@L0hgQIpP3PRMKnll47XHFQzBz|8t$vO%YTsD>~f)B1&$|IGpb z5L-%e8^j&(Q|5bjSwGd>v*V2W<38^f;L~@ZOS^lDg$|tAe!k^X%5VRBzvK9Sz4(!Z zLhq58m`>{QlJClLS@UJng8Vg@o6D^9dxYA(#*7aH`&mGAh9OTH%qhJB;EXVcz!EO7y8vA4NJS)X*0=q|=J4rKA>lOi^E~_>0`AsJXj5KofcG?llt0`e1 zedi5Q^4bDmJT739AlHK+h~$=~VqUFcp)HIIsa$N&!a41^q&5JmT`YgTz>EaEpk2B@ z7nHufz~x029P<7n=r3aicpArB5ut}h#UjSx-S?PPR z2!qb>`$-A?Fg&w!Pd~xqVQL`F&V{$nK(m!gAweV{TZk#P07-;W{MeIUk;G*4a1u>rHEaKQ*c(|?AP@IPudI@TM|TR{YK;!%LM z!?aPN_P9S?Qz9g&UQe+I1nkHF;ATX{`_~to$U(rUJT$_EPViKJBDZJeDa@7ufBvI&GDG(_|OmQL1v;yuUQB2@gSGY zqyLY~M&pl58vSeX|G2H4JqG`%%O6n5B!Otvh;Gnl9q1{s0roO8R%FmLv(@vH$a)=DYOC~IQ2s=U|U!qk;T z=Cc4mwe8>(>zLO<7OuTRo2oMw-XgkKFF>*_Y3 zfI!e6P*X&qpF}_C^9LO1H~@f=VE%g4+vB1`FU2hHK2YX+ssj^~xMlAlDC1&5gRd9@ zJT$@V*A>fJFk1isL4mE7n~E?7GJa(U*e1XbWB5QJk=JsB6)Bns9k)+(8>Zht$L)CA zqTPnJwMO6|=U;s2?Q^LwjQ@n*en2-NA9C%#w+|V**9r`59dti0Ln54u~C)As3nJ z9%_nCApI~ICoOV^Y7O>kbVD`4%wETzn|I>* zkh#GsP$#5{azNDpNDx)|(s<$6K1Hzc9^R`-OVm%2ACpVm7{#h`u7v%!+hD)lV*Y(# z96tC!02=KaVb#gh&IsepTgbTu)MUN8h~ytNXW(-ut_;S*m>p_2JCqN39Ld1)Qlj5l z!+rvut3*iS)cpQ`pSUP9`s+VNH)uKkqaPcJ){|O6OfzDSj2ZktnBZXwAm@-QW4;A` z%KQg7cOx_aPlb)V)^iDcY9p1fjmHD<$3LXG9m6+8{uPe01u z{ih!;iH!cy_s>ryEb*V7e5UzS*B+A7TTKAH5+Q^pr!~v!JC1JNi+_Qv3cjQ8W1BZ0 zGNl-JxH7tMZCV!IT=JtgPSb1kacw%&N_B!HPNoe8em?)`;pj+~C9VIyfaAYM(aSbF zzrFJB)CFO=1sddzYy)P6iH2VDxSb;{6pAQ2RP_7cy!S(`e5djpoHFs|H3E}}#MDT! zO|(-suy2F}`4%ekfQ!?bjAQ6G=HvhNjc@y_|Jl>CV*mb@D!=Rv$K`$_Xq#gpc^5w- z@Nz(X5d>_jgL;U(Pg>T!T(fJt1%0lrU5&v)&aN!W-`q0M|AS3q6EEvTfi1y@jP~EN zCi3`PHzx%6>sX~E`fIA#Rh)zOXF{D`B^NyLfjM!Nh|<6vjFXRV6a$o zmDN3a34(+1p%X|3R+>W!U%}hwOTc1;b9XW1-5Sn0_$V*gIE8JHZ-r}o z9*iB~52IRDLAv;nH?dcmO>&UyL=ViDWeGDBuM`rX>&v;1IqNv^_tJ@}_76 zdV0;b^DDr_lV`Nj7ADM1*1Qt`PUk-0(&|8jfXY4T<4=*-pu(en4y-p-15gYtF+*Dk zi5(Asiqs!OF5KZFx98Yv6_OE?zu>m$IM9rDwKl$yOzE|@*#yU(1VNLDwJB(8lY=V0B}R4Jj5c1!cNu& zh)uaN#2Emqz7%qJKvy=9umXDkC&hP_*iYGP=QJ0flSA?yAk7NlL^*n39C$>RMq_>A-nYVA*v(gIhnOy+$ zCoPaR6C#yIu>q9uGcy@)D6B!6(fED_#8=pwR#ou{X#6rh!Pfw?{&j;f?Sr_wX;#2V znz!!7SCJdJE_m%z#%L4Wi;B#fLT2Qmu_KBfBSYXu{mc$7W1hg+uXd;@&~D^3b=3f8 za%c2`K@5QXvmODPyv_gEt)AB3eYoc8eh-ODmI`!Ota_HiSg9D2 z4L^xlOuU{HGELukBlTBz6V=uU@Ux$?cL)(IgNo!u$e_)ho~bWBWb^s}JT6b#A4~x~ ze9i~R;tXPupv?T9j+;{ep1+UBiKi-Zy@bm>@J60F*K7wAp~+OikIFEwxmlsUd;R9Y zD2^5HzrHHeIeF4e<$0n!;UgjO;7uR$As9|sdV_Vhe|OVV3N#~sY?l8nxZ6zp{b-sB zSm*KQSy-*l3ktLSwKWW$ccO5Z0jusiY`*Rvpw<8G7|azbpDl4|RL`8$akpxa8C?fk zOQ#GK7Z$AuZu-r8yWHK+FeQV9=m=ZW+y#)`2}6A| z4Xop6(f15Vy2wI7Cw}LT=YYLz$+_t&J%cj?&%7J$Mgko(+C(iw$Monu<63P`zECYV zZal(#h9pSPWIi85m!e>v(bJK&Jgl4@PX~Ty(x(LAo{=I`-0#x`V0z$&<%w5dx{cq?A6GNt91%Tq>F}4>sS+YKR>T^!{q4-wpsSJqVzBRCw`pjqzqaN|x)M z8FqixYYPmZkL0842OsgW`!xe90a8U&B{m(#N34iy=o)yLXPfSN47}Mhp+$6q3}7D= z9e2}5VFi+`(lkL1l>rGMfXOlcB+}`3S+l74V)KjKHAJ+uy4flyc?+`v`(AOm@RgQm zPIx(O6DD+viE{1(i|z0taFv&|LA=b3Yj==syp4?zRvrX)HjS#KkM1mFz9)Bp+p6+r zgyV;b4SgQj$QCs1%rOTw#WXKoS`#vFtn!PJL+&mMH5D21OsBQl?=OYQz$=4p@KSJt zFJU^Mf_sJ_UZNC+q!LG4dy~;IFP(qA3)~w}L)FiR3vngvS_Z?5QPKX8GFwHf)tq z!^4-5i9PR@<`qr0nwD*cLp5Yp&6g%6(+8PL{ioLn-Q85d6~O-ZH58L>0Nk+grU`m^ z>{g%fD*{Y^F75YoEp(IEXnbx+ik1}M!Koxw2{lzHyT1acTMCy}E}tEp0Ui{6HW3Hv z?eYOQdAukcws1G*B%=b8-F)ZMK^wC_xHskZVgij%z$1R7By!_Wr{yTQ4ZCJPa{+7% zMq4#wrqjtfjHOiU-51EKMhcO3*qm`dCL8oA70dy(g}kyga}I2Uv+)~wOn0+m46lcUI zfv~X;(`=xiCNqh&9=NAZ2p*GZ=t4OD8en8=g+RAt-|_+;Zmdu!a$=ISLeQ*r3jQDN zb{YoZe8}w&QHf^FA$r(7WTpiLTR^3FGUT7cCt(4*dyhbRZwjRg(qeN1U^C>OPGC}* z28ius1q2{EK_%jKUzuT3IbM{$#@3rQfYpx$(VEo7+E{q>JrG-TDFO#&(@8SQB4OT`% zBmI^2P6I#8Mfk@_S3QG4&9hPOW&g{4B{`*y4B)2!8e%X);t#aq7R$6g@UG^vVQ7A{ zlttkl71Vd@Am+jEX*_iNWD;cZR;pO%FBUGeQS^H;AeU<~4aXAkh!%3gnU3h|EDh?r zd3KvZBd?6^dH~EX;USOo=|fCl;pNLf-lh6glot?4KG73MHHO?EA?8X`CpAz18VS$=s$eyjd-xK^)6wWqHy}YX~`ENh@ zIu;~jQo)wLKe^Ul`IeGg*(0!K!+5xKG`(71XSH=>3n~A6QzbecQGYanZmNICG!N}s z44}$7h6oAs{!bg6sLKqW?v2Q|$;uiURyiiaapn+H5eX81KyHp167-H`Ao3c${;WUj zkalD+f^A!B8@`#M`56qK5J&7?El&0piFMR{X;y`F!evEXMt~^fQ+5js7s+_|HlO#V z$1+?kx*iUEuHJWF=IFdGj6Grb7!FOkwL~^C($x#J@|17Nyl;yw#jas|Y#Uhw zdmemTM%N(ILAJq^>XhtK&v(WT-xgq4k6jJ?t$uB8iijD8HHAM+)D-UHSln&=G2u0~ zMHvOXU?qBl_IqX``rpU<4{z?u#~y>jMWGVw#&WAiAdJw}Re0_lp{vNfQa|2sBMZ6j zLgBtk#`^zq-@W0E67e5m1mjf;=-pDIxg^2&%Zh@aTF1Z|ENYu_WOSWd&b>??SNrFi zfS-g;=mKK9VKTwaRgIz)gPF8@C1&SpwWNZFPwROxe1y!pwEn`Cv~lfEPkRx6!qT!0 zoLL`=!k^XBh{8o;;zX*1X-#8Dep8?fbam)U_K<{VX;>HVcD^B0>Q51}YPU^uD*RG8 zf$8+-TgrkjphIInNTX0U^edE?*{%XSdjAD73}e*#q%xSOt!D_X-sIQRxoFi7-ehD_ zh!is0wOh!0>OoMiF@*5p{80Ke{-V+|GJY~UvIi$Wk*39K24#N2_%ob?De6cU*h|U8 zaWM_j0GM+uMf+mTc2BU1aOB1B!RsZH+%xgY>26tI>s_6=Vgw*+b*j|D3^>pvoG>`l zni4pLNmPG&{d_j!v01Al$CN>k9M(%ntLFr+d2WYY7uU(*_5!yMbf{Yy(W1-{Fc323 zuVhJ8XmGEP_-#n-xSKYxUVRh+_sWf?`Fa6l=FeJWHDq;)eS|g5`qtKVvK;7ymVE9M z!#ol&>7|&41x9}9j0*(yP6U5K@~faX|rpL zGAxLknCx;7v!COI2(w9ue7sauh@i4TvYX!S0^6gnxFZ8rBoETQ;8des37;SdcyiKD zK=#zUt-a(0tD7I|z|pM7ZO%y4cd*d;gyv>>3GowXNJ$k)!&W;Zk>^7!x-CHX8@&eB zFL_Fgspb!uX3Ux*I?yUq9lu91no%XBoFJ6<`#w)|btR{*8 zzKNw#JohV_>2%%yW!v)6a#KOX%jzkRLL|yrS2Fp?E28@Z?^WLE?|EL(C;gshln;#s z?0FG@@=Goqccjb;rp9nx5FQutJ~Qr?`1f_x>^iM*r!m_*?3!Hxm}4})i;g%3Yv)~} z#ULDlsYAh;iXY`Cy4=HBxUKoRdAJ6bJ}S0X27Nw(BlMoscd90^33|v<)UR-3|I^4S zQ`x_=fh|MN4}Mwpw%3GvOmK)Qq>s4gB(wCz{yUSUSa-{;hq~oV(~%WmsIht5{}t=i&kJqM3*ijT!+!ME zVmYQ+Dj*o2gGi0asVySxNBAmTqhPU5@zdCUFYeC5@9Lned$qBC7TO8Py1_DuOr-2jXXjHw>f65%PR z+&|f}jmI8}jz=Y8)97cjh22rEDNfi7Yb`zlQ}{0uONgZ>$1}<=Tq6<&4@x69+KTIt zT|@ol)tARVsW{pXZkS4cOUGCybHlcT=VpASH17t#L zY3b~Bk1+#4@X>MAea2+4~hb)qTDFMiBgPNld6w_pwsjMXMR4Zm_+VsG%H6!DkBW_v zksfKS_wg8+TquUQSbrt_CHQ@b_UoRHv@n0n^o-2UmirdlOQ=p=Wl@rArYgPBDO_=R zyb~_n3jPc_f8klVe&*(ABeP3ieiV7P6cqdNc?)=Ph&>xQ(0`_6r6H7iea~{%>2K$g zZ*Vj5C}8O(dd_)H>L_h#ZjlEn>$p`mMcP{H7_-Zk{@<}?bA9HucbxfnpJ>KoExh2g zUb9nXst?q!lZY`+?T#zK!$bXmEqP$EDabf2w#TDqh3g}Cj}7CKUtd&z|MZlmZRffo zo^UtQk~B?{0H&+&E8RG(z)5xVE2| z#i%TqU~?|CQ^{3XbbzM{ANAoz6Z>l^D7SX%vtgW zXi$R?2d${$fXSB#8b!+;Bn>2#DB|2r==X@i{nj`y20742aCcHWyVa(6Gd-%0&t%=Q zjrcBfDA37kA|^sygn3KVS6I6dL)%hE>M!~u{<+t5sa<0H_$o4B%9xJZ5$65?jR>m| zUuR4X%pyUFIxgPR_^gAp5|$KiPT?bhc;-#@|gKz*6@D&1|u+>D&bwk)?^g4p^v(q%ea z2D8X&(bu=6dPVzrzl$xG`U=c=(35_dQhT6N$a>Nm@;<6t)pnc@$LNo;bRkdNA%nt; zr^lt{_DauU+!QlUq(O<1=VVAm z{BhO|h7R(+x*oA$w;b_QcZ)O7pH$7I2K5*ESH1+en+>veDZSJkb!DL@_NuzZB*i(T zskcpsU~Yi*SYSNO0t^9ye3wgY81_Sp8*EzuHSqwGz{diDWc2i zf*hT6hScxZaSak^J2}i(p{gE}75ed}PhTRGap9^9`DZ*bnuBHY7(bXV){drGE5Sq< zvGH^wMead_1m%HQv5++f-imqI(~nwYcCcwJW}Ax-Qc5YR_2_Az-)0V0ZiiRqqwCSX zhj%yrC;2y99nb0K`i-nyb+LO`|C(A;6nlkaV(l0m{SQRDk^aWd5fi#J^^^_;U16ri zx8{9f4XTBkVXpYY#^Kg}c9NoFa3#CFH)vS%jJADrr$#FNgG6V{#G&Y*78Lyw4({_0 zzOnYQw$Vxdyqdf+#QrV;%Tvp7*68)5kEi#N%;@&xPKo!zKQ>#(em+1xpy093D8={r z46fg-T^jxOHTZRP>-C^dTN0_Vj_( zU+4AA%-3U3<(C|y zoX6&SzelqIq8>Y|T|9Q>-Kk%_ny0Patkh5Y-e^WrUjU*z{-Z1JAz%+x!YWVCYN7G6 z$;5m^da~zA%(P4Ln&q>Srn{w!LofcRc=K3vOGO>u zpp7>4uc^@79RG&DN@*=5fVhMC4$2<+%>o8roU)f2;k`k1Mwrr4`bWKcTdE+nxXgE?m{^W!W|{kF5Hs^Msd1iWLLpxno=znwR2Ye}2oq0$8Mb^NT zB)1@U|FZ4pgiGI_#S@$w(Tq`=-AO_v{gFN2xV?Eiu(Um;@$Ic*r1GKsV{m7CNbVER zP2ZM*adFf)HPl+X4g5TBm&!!iQJLry!E*J?#0L8lCB;?V;xdN}e=XPNmI0p>)t>Av zmJrAW&W?WW-edF_An$B)ZVk-D-en)4`w}*$4xTc4vU(>tRG%kMh5q9%EJF+j9=1N3 z&imgp=C1BF`+{`-~3P|IVnmjCkhZ;4zZI=;1=bMhkY3j1|WHYBMYl9Ocbt0*m%CKOiv>HW@IY);)j7y~Fj)3W{L z0ovdfPvE1S3EtGt@{y9M@mRZ*y=dqyFD;|>hb|0zm5PsyL~BFi2co`d_`f8GD&9yv zpH%g6e(wy!OC>PA0gf=t*+#w&gyK7?=k|1JkLs^oC7dq-BbSZzYaa38 zDHz5=0(p#Qa{*Y}IbXRoxLe*4rkSn^eBdhki%^iM&$ss;whR7N=|IT>qg=U>Pq*Q(r^ zhNFMee@H5LqqolGmEsZ-n0v zU&U>s#9fZ%Z1_PVD&S{hkRLUk)R~MiFy-ZDEdet`r>tunJU-#XSc_=Fq^Gj(E4{9F zmwtSnSnDJfVJAmnpkt&*pi?!WFf-oOlFsY+3F3(pu^Uq{QlhrrCt0{?*ZKHq`XYp^>gdr1n$Z3m@EmzE{r)QvLw@VtoIuqMMhMJ&l`f26|gqT4LRr zPRTbYFyzDy^nD9%>obSChImfIvnm?@m~ZB_aZxoKYsNUxn4R2Y=@xmsw=ngYXi*ED z)a~1Pr*Z|ReAu^9FDx?>OdKNDP#oeBKQG>|-9Kb!u-#~=1+QAv6Y;vNK%qsdfO&@R!UCTD+EUvo!EO zAsqbDCMh8CyicKeP(m@wnp~%Z`dE*XHKB&zP1!~>@d=mYvPL9rnWC`)w$1t9PnPav zEFW7zdB^*=^E`*^&syd<4~tuzZo7|S(QQ118{*uvTpx|nEa%8LzW&x*^oR{1k_D}y zpnSh#W`N*tu|ETBEpc^QHy_d4rNPQ1(&;sK#}p_Br3Gf*t98qBovpuFMaDE{uoGKd zSAG7Fu958VOs2jVj#Zqr9bfOA)%O4UbGi9OE-LlGC)2~uP2xx+T}Zm0xu|yjZN5(O z<>KP%`>nCH+2|z9elpk=bQCm=sdTV-!tBNOJzx30IE8K9iPDo$P+d!EvMT$uB@)TY5 zXVsOJ_mQMjdHIEu{r$>zrt6F7Z>vPr*^cRsUHp3SJ=}wxmMY7u*9)h_80;RI`n0mEj=8G<~(&&dehY|u%7&)SUWCcO1M5qsrru7Y%i_y80sTk zr@=^N9>1iWT2Ihl{YdtAe9bd&;$%xn7u~fFq{??}&j!QR8op&+_7N5San|tF(@8wZ zt2Q~&i@sUU+9us+i~CU0g<|z_RV3P)1x-P>%o~Wqk{b2P8RvQP`S%32bv($kGsS6d zPfQqK6%A!{WtG_>f~B{tgJ|5nLaq{oAWfrrh`V3b+dU zq3K`q?LlS~VE*>|8#4eYa}Od9t((met2G>z;R;Zwnt8xinw9 z^d_B&riS{KDK=fPamsAY?b^Yuy7$CeHv{Y*Cf;H{C?`wOQ7=IC0vA+Rq}hFi)XUK4&X{vD9@nP-0-k z9H8>g7Oqz0oxybHRqo_M%`{yM`KMx&z;98^k{CXCBs4bj%uu7pQg)^@EB&6e<|g;P z(F_P63=$5^LLQl0JCncQekP$vj~67^4K|d%caEXGoOUdRkc?qMfG-zGiBU|1XOKoc zQXTv{avkwljpwrC$6f_lMl5Pxz!}QfY{t#wIn0nc5%0Ko%$3Cmc(hW{hhs(A0p^<# zYM0o}N}{TLJ?Wo_l!)|>dUr&M>bJbcrsAUvu4E~WWAK5Go*#S?#0($Cq@!HqN8F@%uBNN<(K{@dhlS@U)m=yIayMLL zzdKfX;@!zIir{w`lm###H?rFKQozaZHEF6HAkP2MYCYWau5@+d&UlpY@>xAlBwvV` zq{co2Z>trFZiidN->{y$3(UXQ4c}kG@4ITVD3b#WZGlVohRL$2X+UT(o$}a)*UvZm zHM%W#*N2iwsT|~ew0eGHf*&mUoWb#WxZ$b4+q@xNC}=c*1M}y@&2e?1 zOpu$T^9@V0GGBWaFQla+Dh^H^Ti$=Xf3GG>v$mwiUoLm~_R)|=AJgs^p~VYj8aFVi zfA$IOTaK0jh9v9Z-Z=j64H#7YUFP4J2RF?FEj$|T3DxLYvTzc8Mdp`Wnj>cl^z|Th z9mBVOYY(rUvh}T{WfQX-ne^P)7K+QOn>)1&iufoYofCIy}gB8;7egZOXr zzAWXAAcDU|KAo1yywRfjDrVz^m93*ea_p*az_exrQE#rC!RlCzG*9I zq;Y5sXM`6bH8=2KP0QdrO74=U74r7KQCxVeZPmlrl}>$}ZYVoLtI)?f-wrOC8)4SN z?<~94afs=yRihyC4hLZ8r~QtHq*Bl+BR(N} zl$;Uy!gWX9S;tZogd4TeuqHFtVPRiTRl|KYxf{0&^rzlRk!Qnu% z_~3X9Ar5CkOg6$x!DoIz*p*6&!v~bAtZ@UmM+~K2l9uuPZVJ1l`8!diunv-_g*)Ve zUx6Z8n%+{S7_~Gw!MyG;!;q7KrPlJ2iT)m6u+p`Dm$o1Q}$k z#iv77fNq5w{1^Qnm$rvo#lI)KB^(${B&F?Jtw2CQt{UC z?OxeZ2e%dKO?3)K~o!!dZyY&e{gs?=X@ z=5<$^*{#%aAya)5g#RX?QLV+%4-I~Wv4|H!Heu8%pt`pzV&00z&(bYzOj_EFcwy9r zhaOU_%#;~@sB&y&QHnF5I*l5});G%e24(dRh2h^mqmOs2lJ388Vr(Vmpi)_2RSYlU zEGVemq?E`TvdC@7Ji0^rizrM$aA5fyd!w2) zxZ$k4tC?Pl*-(Uin=$jt899;q5=^*dD zuAXeQMTL>Bm!Tl@b?2^26~F9}3?0VUfwhE?o)~0qc8XftHd2uKDlToWi46M3R6mN2 zZDzwzLV-)gU;^9ttUvObCyb2^jO>t156Anx8kRqt@J~b^YhtB_qOIw8z}PkZv9w&( zrj+G^Oxk<9K1|mRCEq2i*3ATi@{q!gV{j(N#W57Gh8ms1&grT2g8^h6FjOADuz(Lv zN5kBh$pF96gI$e;Lm4)4fyyPiO@c!%#db@*t(!rb;61=J=f&Dx81FFv{vb3YnHvQ^Bjp`tqE?Y*W#E?sA0i6UuMR* zSm5i^Yi^xmYuD8htv=C1I2*jY^d}7)!nq1n@ayn}?@O826}LX$g4Ebw!@z0{Suhg6 z_vugIk>e%$Kk^ZZhrrc7cEK<0LRv6=xG{R`9moMy5iwr<-}66>oXN&ju63@hO4*}$ zQ*lqhn}f6MC@mQ6u%7O66$#t$CHQOO7JEjNPy~=rUcP@xj~IPZ^IBL+CN5Udnom4i zk2&0~#P|Ue!7~@%wbW~yz-$SWg?dDCTAzWQbLzpwng+RJ?HuHHjq;&58;4+u`@K-( z_0oi~ho-pwNdEid7-EXxTy}GD8VWn*$uR%etkZN-Nq4I0n?VPHU-zwkj7Gtims(9Y zGSzk&5nw%fM~8Q3C@q+6ON9^uwnio7yq66YQ6=3*+m~tk07Q+l9{}}tAblpWg!iEh zhoPof9O8mRl7RywimZZxFsfR~Rp{O)II7@9`*skGt}-?^|QX_VZX|9+*GYS4}tCF}^1 zFV-eX*)kAiTODx)k#d~y&3JM;Y9z3V#NB!YWp2aMA8=?sdwQzDgu*xk7B-^O;QSIz z6-QLHLUcwb*Z)5cd4)p#?S1#rLQM}yjVsfD>YfAAX|=&~E2$^~8j9omV7k^E80hT6 z{ra(UwbZ@l9MH`=$oD^ehKT^q;Gt2SUp3?#B{KHh?K8{=vpJlm&2NF5YZqMDJ!AN) zBGX{}6aU+OyS#FGq3}twz_};?&X1k<7TTP{>1@|WZr0*0j5YfmN1vDK;*J21_fJZr z9UT*=XuI78Y(=@nJvZ+=;ks{p$-5by^yy-yvDe%(5zJM+sE4tzYQ_H2xEC_lwC)m%$i0v zqZSuj*#r4gQyZ_~<0iVJ7u5KTrM)5upL@syXH#K&-zTY92>1>R^MltnTfdV=RZ~U& zgvmRepiT`*6Sj%QM7Kp>83z@_EZmUv>M6$HhmIsM+9UX3xELx4pgHVj0&=)K@ZVou zlf8N{p}(T~9^|(5H;$${fCP5i1w(LG(6}ha+kyOWY#-%72HhysNQ$;~4UYgTUIJ0e zm}Y?n7fHE25cvbB6+1+CLAH0dY8yrzdLnkfe_*7+ETRJJOt*~>)V2b}gx7~zcl8f2 zYhHh1q1tqbD1QZRs2`ayVf7eBKit~FRK?8QZN(&g1h)$5Oa&oJ3DO9mnR$JB4)`g6 zhBG~o!BmL-D4)al?Ur(wSTL@cF7f3C$l!LNCqyUL14bKp++ulB^Nh(2X@vTNsPQ(6 zkS_i4fWJo{TH+kHV3w3e;O~AjMpxX|*68l^Fyr|TNLbfxK}8rdal7TVs%P6fG$};h zQ4d0qugx&WA|Qdn4g|4pjG*b{)6x#%qjPZjbpLnKBm1gT>AK1QV%r{#gx#_;s2jx{ zrvxE{*9*Uq}pT9&>Jh`B?wWA%Hgn)?ergS`#Qr$9*Dav8BfIu-KUTvbo_(a{^?~-?_Yf*B0tB#ih~?wnktx5>daQJWLET1YvmM35F16KNBJn ztpn(j6ZMwt803Zx_>^3A5SL_T<5I+({jeVS9JG9iatWv*L}u%=&C+{W-`ELk*VXE7 z91Ms!FSG{F2;d15R)jQw0oeG=1|&@$7fm&7JVZISvt-KchBAGL_ld3NAli0FBv;7d z8lgLD7UWK4D=9a7WwRs>dPv;udgECidLBXyTV#8BwL$jN+$8@@H1l63TGTDlg&_k-yWjRv=!BV-E)jlwcoe=HACS2Ljsr6Z3G#7ShC^X5_r+bD}gZ{X^uy zRf=gahR62K#A#Thp=Voi$H~v-IXLhc$6{m@Wf4UgGO?=;a9#nGjxJCkErZXi5U00O zdJ=l)l@%vzixdgaDl&LOID- zp}8-iW@^$Gu&2ow#{L3w_)+?Tb{R+;(_}dYr7l;3=GTnhL+%rZdZ`6A9pAu4FewXn zou7st%HpIOh&M-rCYGcdj2iN){N5JtYD&eICMQ!0h?JVf^qn|9c|VYS*NfOxJn}bW zuEP%Nmm{ntvnv8msUf`>R$iAZF)y+^>zKXyu0dV&rHSUXVb@V*Wd7|q`Pnb@ZZZd7 z!jQK{Utuw3Mg;LByZ>_Av;O|#?xGdoS==2CSq%DtRQVGOoZS6p8>5@;`WEu5l#eds zQjGMnnKOI!;dv)i2x3{-C52zcbx{6AT!n5y|2|cbS%0+6go|(O&U5Uodp8>P;J`Fk z6$Sz6<)5@Nfy8#zNFPOUwd9BbnGNQr$ zpc}mc7a5VKN_a(TFefzzUmTp*8NHdUo@}!WY(Yw)*C+;y+w1RCtOVD-$`GL`N6V)u z^&xX5q3#vOVpmUsc0bPdqQbw1W`h1G|MP!q7+Ia+HKWQOqo7lQeLtbHqk+JB1Uf*9 zoH5sNA#u5oTP9=3?QS|8sE3~R5pmsVU7<&5n6EK zCGcu+L5oOSu+9tw0SlQnN_Fv+%mSt*@m3d(s{FRg{|f5tZGZ2V6K{MUp-)7)ir^~{ z(L7oVVxdjX312YUr&|mcuHcIu{Z?t!!=t)MX=k5c_qgdyYbjPL{wmhCd{n2w$CXcC ztO+459s6RMKjT1LPRHMy@l|bBO;ei6(nSG6gk4-(eGg6%>8IDfe#ZRYlBFWt9%5Bk z>sco*N*kxzU}cRfLn^hNJ;jDd*eJAm(UPAdI{Rzy5+fXL)J??@*alN`-G9hBC|4K! zm@vYyo5VX1b-pmN2W`(^is8SP^2du*Y;KL0=+#M|GTCLd(Gw!h*6w*X<3=1h2+Bpl zi`_Vm@E_)HD?87pqflf>;u4XUmIJe=)Hmd6T0SzGR+G@McW^*&5HcjOR7TfH7s*`r^J1N<2zHtO{MuM%e!PHw5k)fu$2lX% zmrIv=1;(s)Me{aiq4XLr;~FVmbnc{^)eDg$i~xqTg9(mfdM4%cwudCU*B3ug+V!_l z5Tj4EH4t(brj!;>aKCy!ik%;P8B+*%@&!sg_Oc>9Ul*vbu#Kg%Bti+bf00gvp>8g> z!^Fn*K{)P&k@bk(B3&-|uM`=tw0!o99oC~IcBNv!FrshtmPI&<-C?x1{{8kA=kloo*a->9w9P1EMHRx^7OLKH%)2}Vlx5ePKSE?l zPC@LYgdZT5I`7QOA_O6RdHS&1iPriSw`KHa7x`qeB)$S4JqwU!A=cj8H<`sYd4K7c zKbCZR^@TVMLD_^f8X^cT`x%(e9B1}6EqOz7jn?HK;i=DtNU_yTnzs) zXse5LNQHy4g9_*su3_N%ES1Kl%BPPDA0e3+v>bN?V1=S59!&EGpT z^>*m@3(i{!CP;e1D}8hwsqYK)#O!zJ4!J&$SbR%;igws!pi-473doCADAiO%DfIM0 zjTgZY`E-^4!|Yw79s@1gM5^6EFjiby*d;}Vn0tcBUQ2@>7Cu(UDja?Hc}ra-{aavO z%&hh;rUKPEW(n79Q$2Rq__v7&T@;$%F>NGKLOfxKWHLuBG)qJTAEETHmn6IA<@Nd| z$4N3)3D)?7q=%m4>gjZCRJZDK+{`1Yr&3|Z%s3)bMcukKt`0si0yBzh&u^zgYNAN8 zD@cA;eJr}P7Kqf1s7Q;l??&kbs+j?SKIefK5CPpaHc~h*4^Ya_Sho-w#3W=pke+;@ zNiFaG$T#jy%~P9BX0d0m0&YYGioSqKytzfQt$Ordx*b{-YBT)m%00yBtB^(#${v&C zd894m0%M2DpfR+72BXwfE(w(mj>XQPf$-h=7y9QTkV~_{K|zh(B7+j_D&;p9P*xO6 zj%O!7)f};@`>|(5nB99>>8aO>$)i-e1E9>nwlg<@VdBBc_xRzkNrp1)%Q>`oxQF7B z>Lx$cmM{ahez%kI_M^E*Z#~{3P#CNC#AES(PFXOkZP?@-bp1=eLKM?}c`^cMr}~b1 z%M-^WfrT@vUKaE)0X~e5w$fQLQYIX9+qPaA{Sx^I7)YS4?haiQNe!$|V7!{>Aot)1 z$53`h6W)LD02+m<^(8nX!k_k(ui?O@G#i1#aB#?Hz3IpF%w}S z8T=^0({=$Hoc|?DReGjA%4Ip`2DP7fO6mYho>}C(N+>b9wGc|`1!eHduaKh7K|h*{ zYVbJS{_RxJE3O@pM4;bJOu}r#VOs~1$sH7_=6BxH2ZzSGb<&NvcGDXlE_y6=M$O!O zXWINrT=6FF%KZlWFiqNfsyuK`$}-vJX0L&l^hMIWGUIb~8J>Qzr~JnSHq zU7xlEnl8O?(Tt)rWI#utcwFbIo7Z|yL<6)asn0z@tU#OxCD(|2K~I=#)flbi__e#tNcDy0VmK=t@ad+K@)ba;(pL!y_HkQ#vNDG}SL-Y^VYf>w3xgE0!z$cU@5l8=9Or6)m?!8r^wOEGy1 zvkntDb^9QPUd)7smp{c>6(ou+FDdLUQ(g1YpJ+Z=iXmC6X60-CHL?rc$$Bx;aq)q* z@W%)nQ!>t^(hJxJkFJCs(tGI!Ju`20iTA&Q$?g?t7Xf@-Ly5MQi>;0R4#}E5@lJvn zJE!DK&HVvynW)|)ky=OXaIS=j^d%WSe5=%HPUVKv{*yHDB(xD~@7h5aAK!*J5SPs? znErzLR#)%Yy*FB(s1|60$MJlLpD@`Dsr6fHp-nWO*x@oG@kCso1La#0a5bPUOmA8l(gBm{%C+C}kfO>~t*DG3aILTPi-OEMAQt35sx<}3@Q|&? z-5$vtZoaL!ZE(Smh)lS+ip*JSH(8S(*<;tC9!84(pq$XF5DOKIm6O{c>xO{X>5iqN z)K~dk*3PsHwd7WL^O0e#aJ6V1TiBl-2t(r``Rd1iO5s#6qcX5um4Apxchw?;9)%pz z7&Eo26~XCv*cHdrNqUN}aCzIXl6i^9DcBst?EtzdH>V9E717%8^uk-|c1~IGbp!sY zO^$CCCrc=%A#7|WEl&F%cDt8?q}YusQF|{20`THc0+}WKp`npBqD3 zMH&3~;x%J>BCi18UW107-uz+G)bENt(suo*a$oT!;uMm(g`4XFw}CU9yw38s^=+!j zFJN9<_Wn~T1ka`{s@7z?^9)v3lKJc=Rw^VgIwE)YOqs^gmin3^*$XpiwKsn=i5yC{ zRITFeFp8Z8M)zM_1wFYfZ7yh^y>b6soe8c@pSgxm5xmaeX$E8xKdQjFunou1?>0~C9Fs}2eK3~r_`^|OUPub-i=LWFL6w~LZ>M9 z-@W448`E5hAeh238ekzN7j#g;bh$b%7Z5NV(I|1drm?i|3%!O&?sgr?-5w$<%V0X7 z3Cs$@6dY(TUIR^%{f4-r>mGpLGXHaaUa|@!uKB56_404k4?G%hwuqdx4GWx z8*Fm$YFht&rgVZj^Nw5@T8#xpXCA<7H4r8>7Qt)X(7lTOjHQvF57~Z=hNPm*wYri9 zl5%IE)>4_t3)CmcLCC`uVuLLBi1mU)*y%{;uO)%C+^VqojF3y|5vngNW!T~wVNb2- z3$0znyWXT|uHZ*v#x425#zu~>Ev5c~fsWe{+c_n#sQ_>R! zj71j0SQO0thqF1aOfw_D3H9bb+DHrAxWYm^i;&BeL_87Vpzto=_Wf=Mgim#7(qm6H zH6wxm^nlf#*R7k@j{yw!!zFXwEQgB#?5|Ccl(>k7KmSGak;Pz=VFWyv=Twy~TRat9 z-uWs*EaWP%eRbSRQx0f>NNF)|D>(ytX6T(`jD+x}2YJDukT_y!!ZDco^zs%f1}rQU z;{{H|TgSFyeKNL|j+fBUD+>n`lVyF4kcykmS{x}#V6Y0AUnQC%`nRYP)C zC9>_CTo;rU-^(Sd|32S_;>ETe;bu50@`7z$Yt>DmlsygaE5@KbaSQ9Q7s?Z8pA$c0 z7H7Uhe%RI13ABDjbP$O){wZo$JggVHn)6l4&Hp}k)PK?6882iNjo{My65ECr2p1Jw zBsna`23*w53Xd_WL#|uxD|aj*Gwf052qS_LeK31?YIW?tL*k)Q`?HG(s*1ZP)NG8N zZJ+-G8nTd!EO-$1(3B5d1@3Ps<9Pywoq8gR#CPOgJ3XR@gZ2$)>rq-1EcELKRpq(= zJ>3_Mp`R2;KpKW!{JL5^bKS4H@U~7*)8fd3RUk^(Ei5?5AK<}e3SZ!LwAj?NP&O+H zzBfe3xjRdmbeir*?h(YVbb$)cgbWm4gub%E#o9%49G*Q(JWV&7cDJ5JG6m@>%iGmR zrr>3rSeSX&WrTzHs6Yq6ETgAEbg)H%Rq(@-nSaXc_efN-uEKZ*P}09*rd{eh+HMC6%nlfLLoBQ5*YXa`uK2c zmX<<^KtL?{-%aDrvk*cYb80LY>{0g?-d!-!(&CNqGr7#~<2Cl9>HH6f=tX<^t9sd@ z@(*CJLOo2?9`W3o_*@1+DuU3d-!Jcdy`L|xoW$N^3i0X#@tO(&JcI;rngDFmDUZU# z@>|cD%Z*e+l&9WMFN74awhw8s?Lh+70Qd3d!`->BarD=U*-fUMKFrM`H6Cze8(f;H zRKHjSWO`c(+2ZeEJ8-HTi&G9CP4` zvXE^Bqv$=<`6|N@fIkg5CSL02nwWilz)d!;W%>hvQqxod9vR+J}j=`YL!vX6ih4bWWJbs-HwZbO7eLBKrYFH?^P3gBARHV5+LE3#iljRCQV1UK1v5IQyoTm8rrNT z+wUA0(@L`7SG`wX%u_%5 zjugwlA|6|on91z5V(x-`&jWFDViS4yVcypnvM-XxDTn`7aZI>e>pOo)=B-7-xwG>h zfjcRlb6Drw)pOuZSd&BuNOh4j8T$e|R0(Li8VQ zPm2_%3b`xArwcXaa=aC!e(5 z0oe7`xkm_1!fPM#TLRkbx9*n9#uN?J(_F@#)V;Wu9eB4LOAU~Og(jgi0#B6m;xct$ zE-ZcPf#iGq#N`z9;E9lGm6q=5%*M8mmj4mgj_ zhi5IPT-wcLPi)#|#Vnj(2zATmmEe_o-Q>`RFt|Fv5=MLcFFg z6=GdPmF)qm4h44Ok1@3!a1UEwQu_tR0?DsKsAv`098z@0zY-AfEWYfnY1;Ck`hIeb zAWWhg;4ANnxNRmog7z}=Z+8Gl9@Nx!V8fa0hp}tP{I>Q6uLddoVtG4q0O=Qa++qHt z2O^F@lnqQX&)5b9x#7!t)QT#86AE_?qMm_g*j*^56^5g;q{G1dcXK2(YskCb0hc(y z!9{{ur1=&Y=xsaqz7hUgSN$B}hAC&TZQY7Vb+$G^VNOBJ3VZGPj5EKDQ6jB)A%Hqv zh%V=6t0s>s+A{<{`U7m`q51dx)w6~#M__PM`Y|F{HN7zebQrUaKLr^l#cQ4)j4u4S zR8^XfTW2rs-c9F~vBNF7nr(*yKAWmblIU zG6t3!pLx+|FaRa*a?ryPIF|gHROSS=*E9FQ#Y|Rx``X#0BJ(9^WI4Ay=;DO-5?Ye5 z7w6%ft1&S>XKgNI0k^8?5vrE2gu=Sz30x8~^i5nC8E~6dh}2b&`?JSS`e0WPabRK0 zMj;oBO~`fdJex%fWqt_h55!fgm8`vHj;Z?5^6#Te|Bz4*qi?$0-Ts^^t-=#^(?gti z#qX}kk+rYYc4dBkjD6}KCj0m2bKj4d0a3q?Vcc$)3DBGmmA&AADyh}FKLP$S!#9Ad zDZUu8JHEuvlhsw5JFjbD{$%izUFP9+EgTT0G^dH$CzQWLK+ejoyRNHvB{c=frZWgm z;5Kf527d)<2D^Stu%EI1JvI)`!HS#V)t4&;qV|hOTPn3XU)OJSNwM%P+osxcIq{wi z?ZewV+6AWGQ*|<`9Tcd3-$)~%_6^<$HCQ@{*wG3P1uY7<-ERFeQ695$T zPiYK$b3e)CxneD1VPO0XbvlkVyBv#4($|HFQsE_2!>e0tGr`yt#Mp{gByp>IZ8d=M zBXf~%PJ)5ZiTIzEf@yF=z^hz$v*+>tO5Y|9`c5hMgY2eQJ(GWrQi@{|zkA{$gP8ZS ziuIlyC^rMps@pWsc7OIPy`!ubj4PJ*h8b5ynkRmS{n-pW-kO0c{ZJzaPOqu9EXh4% z$I7m;bF7sJi6(2rp=Qis4AYx>-EJ(W^m7Ulo-CAJa`J(KA2b5vpZz8j@o$>y%rFb8 zY}32(YhD3B#KRn-hy`b)S%G*oux@J)vzt(MMk10?I)iWFDc#jAS6=f|dwYvHTmu|$ z++l~4zulp{6+@O6$N-<(^Cu)wNRqI(|4_<4M{08xkM}8oSo>1%DuxM4Fq_f_LCbln z;j}Ui*L(Jl{>NTDe@N#i&+iP0-1rm-{vEzYB!jcvI5LEfY0aRU|Ip<9L|m=ztr?kv zV)0fwjvxb+t{-j3X**QTgOpbM7EwI!mo7xHpeWrU|^aPw^X5^k!i zrMD#t%;pI+?X@@PhDf5ji$_azqzX(3RhmW()wu$vScmu`yp+FV)!{L(2toQoc=co^ z0)n)i!fwg}O<@5J)(HlzfOz>==3kh@h%oT-K=o2Uh>tyj`S**wu(<_O7;b6lLLe>S zx4OlJHax62>}`LvjnJSunv=HhS~3+pzN*lbO6%;{i;VKB7K{yzBE zvdO@#O@XL!4TpsB!Dp`1&N#>RF#Lvod~7*J0npZ~b+$bh!iZ!dds?S@+SWtPfqmxH zVMrL3&4ktfH}b>t^(<@_QpQ(=8eh7>daDQvr=oJ9k?#at2Uid~yk`96n?$9-tnF|c zl<&7+>y=u-PG+iD2>yyr_&3zy@pgq6mRnddYTFrI9=F{lWT6_+bA9Sl15!5}OWdk1 z%&U+3^mmSQig(d5AnT)#>LyzKKcLOY2g*0eljew@SafM!u0soo z4Q5x7Y-%#v^JKYd1k9N?UHQ1_L}{jIzzibRq%Y`@83$0LUbizP3r~~sn61XG2y|NM)+Nw}rbw zFd095rPOSJ>AFBo@w~1%nRvKp&zix|vgMm(`6jlse1>Rcq5 z4$Fk~`)>h$CU!HB?bpbZYPUZ?W5su~rE%4l?fk(#QCv|Nd9D5O5??WkwwvoB9wuJt z4+~8k2MHfi<*~J<{#VNmEP&MHMvCY&c;dIuC+{Ukpv1!yPGqh|xf~>%)bmHF*PH8C z#pL_~;#+4lUO?I+0e@{-O9EX63#wRp_b<|wXrGgBfI8YE z>w4VG6k?)`#y+4|Q$4WynfbgAS1v5AD@k)7UdTdwgEif%A-{;)XD2UmZPRQ@<|xDr zqsKnf+B4>o`2MN%eS({W--kx~QcNGj7^5s^7eWOSZcGL@&yn@3ABWL`g==in4pCei+#xRw9yopbCVUxG z8*3}eWdm*(D2xm^@Qabbs{L0JPQVIm8R3}A{W7uwmiw&g;0 zVHFvsO14l4T7m zS7i2PWG}Vlv@gCrI^7g`84Zr_f6{LJ?F6s7$VqBW3eXYYnbe;re8kF7R0~plh;Ke1 z8TW#H$6G-Ab4pjECG58&8)k4!I-vbI$Hsd!uAU>@&|^0f-{-m-br&Tc5%b6n_tQ&B z_)qA!QDQ*M6xYjc^&+B(Fkj`%M8ZC@21Z~j zGb-eGsO@t3Gwy3XLazklahJ8F0STi+$9;S>H20Gj6DDn-7ynnmquq|CTJE$!_XS^? z$~qf|)@rn7Rw%j{QSC{7`6GTk&b2yPy2tmUG2ie(-~zA}gN%$lR;w2>90($WLLtK8*+oN0I16xY;2nz zDFdo`BgS;*NceW*pf>j=JPVry4q6H)?{x_FX?(+ehCO?G73pK@xbfiunguVU<^LRS zppbwhzFUm((wOm!k>UDjnJ^vJ@ekH)xi1~@p3)oLIe@0vO0OLXdPnD6itMoS^y6%u z5!=6ql-4V%SLy?`*)KY&>>X#R-rWB7xA={kiN}sSq3+O@Ca@h`rW`3i_Sozh|9mRr z>!8!d;#8630$!E1dR27kmplXm>otOXH7G8(g}t{~&k|kq-+*FQyk^E+Sb!0sZIw(l zWq^}0aSu&%c-KhfL*zg^9eElPU3nMvOT2B<2>Y;Hznm0ssk;|ud2~FuDsY2OSCQTI zjmfLSP&q`e)XqBg*rOsiN=3FXBQRFzw?*8m_fDgY0Gb*V`g>R@iV=+iFVs%n$Pz{O z)~ij2jlGS3&DOJ|Hx#Re5-C_?Im>08Bsnx)mT8{~CVU zGL2RYj}%N)5*Z4QO;SeS{I10#@#8thf0Y^vMZ<~HFWXbcD6ddd1r<)i7R^ z>Xr$+EKO$NRl4Yp`_rxde|ur`jTujB&ncVV7mLFXiwhw&DQk1}ImJUN zy(`rp&WcCDdj)IQJZ8AmyP#Z^u-w}AP5;iRG9ZpPdshqn1$34IP(-ar>(p#cP*eXMx(g9L2yOb&R)&ppF};trxWhQ zl~sBpSw%84s=QpX<_*M+8|;tb^FXcAl@qn7Gl4! za=^U5X<8vWUDnovTEfd{9(z^nWiOU9LX;l0%G+q9tn+3fHORkb z7OLb}5xQD=RKJG-hh=q9AIe~j>qS0BL^{zGcz^ACU$|^2JST~@SNLAuLFa)7u5D@d ziy*4)j`bMvIihmgy6DeL4d>&k`#y~Q=H=-2nIbvVt>rb5rhm&cs!ykCSFJmn2wxQV zkYp6D67JK9Y~#`&f^0v|v^#m0eB=drmcbEfS)xTbk^P&s_CC$1mL4&j>HH9)>3VuT zIOiHR!?T|9U$lx`^wy)MvoSaF+=<(jl zgQP|HTX1yws89F})Pr_D>u>H14ObS7yK{*Ex7|05;V4&;g)@*>7=7(~3OA#TWCJ`I zjhPl)mg$S!8fBH|^zA>{qUbtT^4G8{Ro;h9{Q3GPU5sj$0!Mk{jddCKyQhhTPqVnd zc|;}GK=h6%JzHb=bfVIHtt|0dAjbX}a!Rssw#I_WUf#0B;`Z@zJlw}l^98!GcWY{8 zKhBXUyk&TU17b02_WNZ~V?@r}Su4~;mi~S}ai?Y4fD|;t*>gu8mVLJ<(SsNP)J`D6 zu?iTS zpp?&8KP%hTZnQ+KY2=;!^eFy?A9>k#vqk*Yk1i%Hh9%}WxkjLrHi?1#s&^Yz-)9cR zCt+A0_esLpA2(yS?-$dqE<|z3>y{}o$&4a@)_ky4RVQNw`On}&1!?tfGddraHmI)V zFdo0;s?eft@ihO4ZJ!U0K@;E@l)P@w^WQP(&#@c&)~{++xgLhD4#s2m%#8#+e=kVw zif*gcuE_o8zUPKtQS8BLMnP*qdZt&Jm819gEL1-O`M%RYXEgvZPFonKDTSKQhfDgF zx!MQQR{flM`)%G2Ce+ni;_0A*gZXixjV+4jtLyllC?6V?CS7(+uMbK;*=Dsjz>Ji{ zJX&c%KuJr>c+$W|DJJTtw+|WW!|N&W$yeJ+x)ivl%;-~lLEO1H0OPc_a<_Ox9-|P! zJTy`EO}mua2qY$CO|53MHVphNPKv!Zx|zvvZ{M8asn*j;y<5!@-Oho11?Bcv-t^?1 z3Say*^Y3&ylwXClEU@Ze@H9`^fc@hel&m5AKs2a1DPvBAZ-*C{Kgc2R`PZ4>0 zb)xO92;Yh^?ot1mpR}E6M1AAIhscrIr3hi-RN`r$Dw=0(y@|*fq_)3SC9WEI-^|y^ zJk%tLaA)>J*J~z_9#y2u{kzjsdlIMXB%lJb91?01Yjmf!LH}CF!T6uGB<*!p&1|Za zhUH(Ma)kT=V;bAEhsR&eMA;i$Q{$aiR;g_YF>XWg7n2Mj`Ma+8R{eZmp+OOH^XY%2 z&*^3=D9q96LjuNAps5IE>I7R{Fq9;Nx9tqbTOC!!)wS(w{cBkK@2?r37-Z4Yh*J(G zp)KFjp}uMYRjOwND{ESgVzT_>|Bl$&fySy3=jdve)gHR2Ez(*t&mEmxlfz@C;tT5F zVvU=@9MO%ZRcV3J7W&I=)CxLO)uo8yild7DSI-GbGi@4Q$JB~+9w)~wdW_}9w3c%A z*I;?)ueECp43YO)+or#I668Ns)O9Kg(QjXPzGhaVV1cRTFIANOwuB4c=1ax z7_UJ@Ug{tzL(Kh)VR1XN%ySWtDVyTr7$022nyDHMDb@RXz~9A+uIwhQEct5CQk>ro zmiBbFO0yocxGRYHwpbBBy!EW==6%gm2CWot-JvzC46P#p$zee#>!kZ3z;^P(NH3yr zY-*fYZ<5fKfzz>6E6_IySYz+HA#kYGEp-j}AEi#~< z|D|gPP3AAf!4ehteZeY`VEgA@w{ijD^j>#C#N@-$om4`VaxnJ~)~pMU=fBy*ywC7V zANvjm4~m4HgMR&IXA)a8lucBzS!+6*u82K?!5YkFTF%}%jdn*A`f1LlgNd@2RReMh z?Z~fwq@sI>qU8~Nx*?tY7Fc69v-~^cqdia##}%Q!A|=8Hi%S?MmNeZ>_T6|isN+kJ zB{x>YaIy&Zw2ecJM)XsrcKm6Huj^e6qh}|@Q~hYvywL(D-PGqlTZ?UULtB)4xPKg} z%Kh0T1D#Xa)<~K7jjG)*CkeuwhXLJsFW=}!O^6@N;-8%*B}gzY^Yit2^^0k*sscQ= zTEu>Op;(txO~~c_n~ezVWzYqtVUzY&!g3=N*P*wUj<6pxbCh-b(M`^Yt6tfy{>hZH zk%SbESzk~tTP_Xt1YGjotZK|(iHX&YwariHphh|qXABbpfJDx!Ajl_(4qWlnuC=ly zTc4KC>*TaYh+ei47+wu{Yieh`L2l2})Veo~oeqqBLC1;1{GO4$tpl0==WVnDQB1!M z$@cyXMB$~`m|Cs7=d!N4BM%ThIQ*-yE?m|5k2Gh8pO7|0Fzai|RpN?nBsjawT>+5# z$r-)V0)>)6U{0&qM*{0((gqv~-QjpjkY`LwXMFj`?t70R-d@F4ObQ?C!N@}wwMnVqT?GzMFfsIe!To>yXFKApaE!j1Vp|1NsDBD)k)up za({zp9Im+^oH;6~=MJHxp45kD*oraaNO=7n=gYWKH1$c$#foJ& zbtaS82v|HUKQOyqZzNn4j?b`k#27yQCgs|vOpd3JSR%mE^f%orL-$S4|4ETJ&&mO6HU4!50y9+2+d^2v6 zc{Yp|RaJAGwV5MiFVncnKG>&7z9x*Wr$axw-l`$A zq79s8PTNf1eVKxx~?&>LQa*qJe zCR;&e`4*t>K=!pcuUkNVZwzSrJ$~G+AAn;opcHc}hERtrJh>T3l#M1CGVHDg^pgzF=A@~zBZCFIm+J}Jn+CX> zIUm`3v-@JS=Q?7Jp@!USWoq7q$_wDLG6nPu+!$(}l@=*H}1Hx@| zO+>@WdZ4`-9!{!}AV%Lt2oZWz+rnFrhjex>57!!^&LfVYyn_%zn{5}5bZDzb$e_C4 z;Bp<~(x=Zx^Xhaom-}zXj;rt8rEV59Y6O+g+dD#m#QiU;_$#V4E+%T={yR4)=l1S% zbAF3<^-=o}x4UT&9si5^0;pN-WqdM#suKa(?e%b5eCL=IPXZ~8kP#+;hDV(Kqi*Y% z`5=tG^Bm8h6RkpJRch*3QDUKdCBVwNaa_9Vrh?*x+R@!YYUeq~_ftgl zt@7&6X2vDH(7JI?@h5A(5QE?J)Qq3NLT}M!?d`P}Tw^_9rJRP)*y~FG7c60u^FO&5 zUXrfn4!9sFvLnI5 z;Md`Iz$nA%P$y28mvCrw`2gB3*huSMCzR^%=2xRNJ5VG)xz_GM_@Q6ElP|iba8EaV zoUqQ5W|{W@j1e=ac9XX7xY~46Rx_Coxr{Wpi(Yi7fx(Zj&EqlX8fE4|X!)u0L^1P< z4Z4no5gOzv_)_+07$(=erP|1;{iACY4FS;?p1V=@ru}DuUQO5Dx5K9W1wbQhtq73I zdZZ&=KW7Xt!+F;eOYLg{dXFhFh>=7%;E`60XoX7?)QzD$Qd;XHI~r+^HYJ|(Sl)+g zz4~@ds&q;h7Z&z4gHL#G40%rS)Arq*DB=$07VjUY&LbfckFF?<5m9+s7zv$;T?hOV zE;nz0%&JhxJxF;|O1a`~p6DRUsh6ozN7zWqcy?J-tG-lrTdrhPoq-ZXY0M?nl`ycr z))UMn@a<@!_0^fdMT`oquTNd%Or}t`5n_jMwx5|>=Cx)AJZ|{~+9^Ihjy~6mIJ*Xz z3S*QGKf1tbvVHdfL@lx$3u1-F>tl~Q`~~?jG9^ypVy|R=rC{Bxe&USR!jUi?irJjx=} zx(=$`0uKxI4>1I%*!Ic#%bY5YzDSWO{GhV62dLyTyR|{O9t*L6HhClu7S5_Rgx8@q zyV^N)HOIM}Ed8&$gIrco9~~eB>5|h#S&aq%RQn) zLd7B*wRX^c52u(wy5^|q^tA00H9Tr$M-y(dG6lAfM*`O@dKhYm9koWI_JHx#bMQ&+=Gobx=Xy!X%7vR{YIyzbM%)ORHf@*cKer)uM&&xpB z0?Rk2c&5ACEBNr>C<(G!| z$!(uROHqcEEC_vkGad$NlIrTUJTzi28TW19f4*7%IPHv7yq5ux95XN|k z@p6#$LAU326oPyV92GbluQq_TMmgECj~y&$gH)=!k2V0d9q0Ubei`eNY%*76I|Ndb zOA?$0&$a>g>S=;ZU3I$yt?!Qz+l(~@>O&dON2`ZP{k)0 z00zv3kT&J|q17*Szb7KsAzx>bCC^fS<& zfPYOKin0EMUgmpA7~aHL008-MbvDSb;P+}F^h!(oAGJOeN3Hsg1CEeE`bWo-z~dC~ z1B_bKmBBLH6;eaug!a?BL+d#%nl70-cgae};lM2xz^C>|F>}wLaOc#T`L<5nzZJkn zO4O_f!M42^|NdPXab$H)D)@je=j~eBHMWW@r;+4EG-PsH^vJ@`499)k+B+^gz(tdN3)3t09DpETXo&x zf)dBpqgzcDd+HV41MT;K+0FWrzzUA}-bpM!#~s022S91Urw;$ncR-c?qme!%aL-m1 zk029@glDRNLclXN5)bxu0JB>g1yJuvT@=Z;)y{nd#JCgWZ|eph?j;s)>4+fs@cr>T%4T;@LU)krs*&9nelL!Z;zweB~#jKs_P zzk)qY46C=_?WvnE^~0XfgK)H5D9k{i?fk=0twAc?-J&wmf%k_l1J=T}um4DBQ8_^H zixRMeuYrs@55;p7hEY-QaUq;;7?g7=l>#xXOQE_4h5lT z`!q3g`hugV0`=`ET}|DSTYs>#o1N&o!8+^veh#=}$2%sR#641+z0d+vZ zJ)#SblLZmcqelO(QrWl0ny}cbtLt7}z69w^sD)42y*olD+tT zs!H+{Eu4wH-g7l!9)Q&&{^P6X1BHJ5yJv|0bH8u|RAKTy*>i;t8Kf_w0iSI$I)ZwT z4$#`nzd*OSs%-LKE;L93h}H@`L+Ow6nuddhru0q#QEUx{TuiunbEBF z9RG!Gh*b}%xc*%YaE^xPmH|4O?l<5kS$JACM}!{%{d_27PYJ=Z1LTX~S`bJJmx%PQ zktxJ3T4Xx7+q6nXkn#A#XZinoI*@mjy@#%a7UzHKPTyr@->ziDJC*ZiD)4dP)`CQ$ zAP4kxP|(@kW4&N8J~eYP3Tr6bU?# z{$q_%AS)!E3<)a&KKPFc0UgC(K*H#YTo;z83`iIu(z~F+gZlyw!vrrd*^2>pJMRmEwvGTicL(I?yQx1416oMm*)r5bmny(> zzS*MV{Q;C8sW_ND8~#r`wr$-`Ga1%!3ds7ZFx(DY+#BL2D#gED**pF z0tt-|HR-axJ*PTSP2hh?6M2&ZYisZ;q@I|wp*bJQJ>|0zG7E9+_phx5SIm;z#A9#!)aTFQHI5-vd zECM(ZI!ZZ1Y4rQs3s+38x`db3sk#fr+6Ddn=7hh&aCKvn7rzHM$@wou{?`RiJGchi zWtp$%>V5+#b{P~%T^;u(OCUj)Bfwcb!Kt_GvhX!BJ~_aq|9*qCtCHY+dk9Pr&QRTt zQpn-`ON&V6g`Kg2MM$Kt%`U~xR&*lhCdKUJV}hBlKhOl3nt(QKcL@~PQ5U<4f>ZSj z`+#57rs74V32~`?m{egNE-~td>Mn+T*PbWchak}ng_Cq1QF)Z`BQ(7I zZYpLSonRbYbhMK!sXjBl67_%glh|05ygPVSRvEjAg%FW8jEZ;z8cy19OW5y_dL3F+mC}7niLv%_enW^z1 zZ^V{d1=-w7sBG};Me8o?XyGEt)YJ=<=hthvU0aq?|ng6c?kA>MQg`^VN?0T!71IA;6~z$6#b^GXD}eL}=U-?N|@&sMw~dKRd-83^Mm0qESEKkI!o+GLOn{+8~05yU)aC^TKDMWx3yyq0j& zdM&X#1U}|(k7Zv^C^rtQr>N9!S~A7$x37QK&v*bPT1zzxT)#O89mW{M;3!*pI8ASD zbX;uu1~%x+9|68g92l}`Ig$-hm%;hI4ro%K^lC8x&22h_#4Y-WP`38cvZOH0g$z-t z5=cXz4KUzb^r50@2_HLNhw$LTV#X& z0f}{&)MwIvOKO~h@sbk-{APS@ycuY2WdH3atLSFmJ%jd>$WQg;kSKuivx@#&|2^6h z2CWcp>n?Jmp+Kd8XvxOhW{^Z*1f!)%BY_uDNk6E!Y#*D`0@)u$a+~K1t30c&ft8P^&0EzpQ`4coi*Buk1gPlDfK_67R zKsw#`bDAkMMX@RE{7mjMc)qB`-geSHtb7G|7HQE_iZnN9Oa$aNUIeqK_d)5ozUCdB zG7!@WUk!^NHG)I!L|!YMZbu%8TBr5It>nIs2}i|`RI4mu)ht6CJO>%kTOCk(PuvknA^I1Un;Yo}V0!?p-x^dY6MqDn>OD^^Zhh~{UZG(Xj)Eh9Xdmd_IC*m@tRZ{27{tkS22RiOionQeJHh)g4!-mufy24Oh ztdXj%qSO#!KJ3VOApsH*yW`^N{=i?ua$kq%Q~6={7o69}!|kp+;`=S{BVGH*NS!wS zi~ze;N1$0-9QBHky^KFStHhuXg%+-x7trlRzx(h9lv0Q&vo=v~^e~6sla77{X?%lU zouVGfMw%;|2)E$IdUyc~1SZ5$Hi@cxKr#5>$khwrb+{Ye0J44wn?|Cs!rMq$xbt`6 zMKfe@Db$rac)pa@5dwTn%aEWd5hEc* zq;&HmUrk=jQyXI^8HaWTPAXu~hKS-3!A|j8@DZ+hJ;hsuI|#5h#+vpk_TxZW$ayMp z`KWOn2Q>)c=or#sbh~z$^?86{FpqAc1(wEUis6!Qt!RfO<^4In4_II$=(hYe1ynGQ9|>F?^z<_n!w7;7rF>pNZ_3RXY81i2)IJ+ZwI4#h?+ zXaMFS=%CX+p1|@|k*5uU-#o~-yUh|ffr(H9pQGk#WG#^9sd!Qa^0v!Yg+eJ9P)*^P z{)p&dydt{=85V23Il{Z|6B!lv;A>Y=DJ8rD$UxV;95V6!u2S8l{d*mru~Q(?-@!zI ztK)&k5fKbdr3$6te-D5sd?JJW4jSIK0M=ePt}c|(QW*bSwlZRB7KPh@@T^D;M#ssy zVvX2bCIoIX{WS&*`GjyXOe3llcy6k9oGUCV;2b|e$>-QY0;U@)aNz=rQP&n&_hmD5 zkMIIM{HBlq+KffeEckrlcke}(CE;Du#Mfrt^uOZ9$FvU$vDBYv=ey&|A4zY*kPM7k z1W~9v4=aE+NCW*vbhxQoqs;ZAXwIdb#b$^;esElLt5%c)n@4Nyu_(LBatEaf>A zFe1fi3sXf+c#1Y^5JS!{8_?>xLwr7sfFm0w?RK{5cye84YZ7vYL~iaF7=lb6$~&PZ z@G57tftMar&8hjf&~MvoYqT!%;m7(w=~$P2EWIOUWNJo}p;O2Wi1OD zhEYSXb%S6b9u@?J_b;Ai0j>D}WU7447jpGjKgSsCq7siZrXp&6mdMXOnH$Vdw$%DQ z+-wIbfiVwX4v`BY1X}*C-+QI%>cPN%Lq&__9sGVKV1l%l??JF4%Go9Th_1nmI7|?$ zY}xGDYa=-N22fGona5hPzQ2feCU4O!FhOeR%r`QE+l4`sm0arGk;-0z!FcvfY`36S zv!i3p_1=RmYdoTC-@ClZ!Wcs@T>n|;txi^{CZlaw>YxD=HG8K%~o)@ZcqAru#iX~H&w}=D6c&2o`X5WJ081QjppX$(a z$IyFNwOgQ1E7N_0tgXxzD@|}@H|dr+L)(9<%YMTE+7rNj8Ai%T)UAf1v>S1> z$&ET5(k&lU>6Fs4Y$o6j{^YvG&3>3Yfx~QFtttk0ZhA_J+y`KR*L6pJV*n_{wKZ)V zuRyqeict{+4z(uwVYy}O!m6^`9#%yUGkWm!%+TRuI!B0rhMQPu=sv^J_~e=^jvBN9V9`U}f= zpEP|7bobwD>8O#Bq=dcYASku>fP6-6B^>eeU|wt`05Rp!)d2I9daIgV#cwDl^?%1x zpyzJb6`TW6mw}`gYWNGR7M>uH<7LZS_*Afjm%$P?@SdP7`(V~C1&E$z#%cbl4>d`c zJwrmj)ZFbsc6ACnJ#_DXS|JkGQiUKI|ZiDvb?>kt#max^_F{lC1z z$$m7=Pk(u}cF&XcHmkssz&sux^IoOBVCw0Mjyj!hj|(@|M@r4_K%Dm18{pw|2oN~P((q6Vq47aX!YJybXLm^w8%Vp2vmgc}sIHWgbe+kqnjC(bq%ZlZiE7d? zjYXI#))d^rs;ADn1pbrE25vJFc30s2%mb3eDrdd&y2?rz>dE1$xmglesZ)_WYi|uF z_-OcY4wFpS>ETV|rhji|avkT?2|lfM@TPRitI9E?6r#hl zlPJHWA@}SQ&|+bp8$`4x;VH`2;E@-C?e2(d^G+~r0A~NH=ASor%I)=2@tS7X-VBv^ z7R>a5k8`9ifo?D@WEP`;eGTY<*(0YizX||}bR>M;xR;cb`bH%!0r3*YdtPec{TSWvPWF?6BUI%QQoF`gT;-oy zjD3D1i_7QX?Slu3fJjoS25td(P11k!XsuclDu6X>0MvXS^t zz>Y72X4=0?y`&=3{0-W^dJU|#4m@c~9nHBOwOmeeLNELzv($PC5DO{mi?~;Zc&IKx z*S^2<@vfR#fn4=H_^8>KtEn((p8rAiuJ%KxN$47qw=#e=e75ey)%t1{=h1C! zYGsgMtMoJH`t!7e#drKJ=;UeKANfm4J>B>zGahz5Qo>_9Oy6h@4%E)qG<-KafiW`2 zd|imT&*A7TH-h!KQ?2dczd9__ezHlLobZ*ryCha>&Z{%T7h2dgcVcIMsf=6o2#u#Mh-aeBY+czDjaJ) zQsxfY71B;Sl!!pPLIr$VugdnfwAt;| zB8s^Ko!j2ND?EnuUM6L1A;P*nsm(xCP=|F+Oql@y{Gg{Mc&@3Sb^MMs+smTRAtTHk z^B<+JJG6`jjzLb(!diB{b!Mx2EHaDLw=N@>2a4U5CKuKIUE;OW)OQL~+hx7s_iDo6 z62BYAnnC-|gdewX>#~#ZD<5@B09UZPRmRM-@cxP%bR4+SOIU&e%cBR!usl z0t8+mW8{)I@M%8pqt_0{L1fc6f5cyzuLFD9KlS{@u;PS=@7mxDHQpj@73?yxZx8k( zcKxy(d#2Juy}&iht2|)DF9zklZxnK1bVE4=OBE|xiqPIy0WMn2RC{pv6GZ_udChVHPe-WzGPip&HoC#PWsK( z9g^TM`8O)9WKk<+E;VB7E5HfYF^!f)c1rI1d%e$S%K%1wnx*m$4Q>%8Dfv}tSf%*75@vYSZ3UN!c9FfkFF*-ec zA8rDi1gPQp@t|jv-TEq29NZ|ud*#M|W6J5qngej{sko!h(z^E4o^=`L5sr7QW^sAz zPq=m0wj7>Jb; z`6Hy15^dNMC*({+JzSpr(bQ0V!N3M~GEM7khx0D=?!MaIJHS0*5yW5f#R9zkV)0(@ zH@Gg8VM(oe-U6-bNkNeP^(ASLPV1d}WPM@w5V2JXW>zQ4-yO7LZ5k<3tP`?MAnlkO zsn;*3XXj`7Na~lv>+j5BEMoSK#WqVEjXDmKEcoEo6`U-zvAVlI*>qD;fP$3?W=Xm2 zQ0TT@qed3nqY9(`VG)5v_=@+RMcgFVR}KWO-z&C#fUP~~RH~D%xjhrGJ~z!iBqS$> zZ(e)?RWgy1b@2nmKqa$`P=tgLffz#YYXpYjg7xoG@&Y||PG<9A56xDS<#V0~Ev8=`ElM>b^cK5=}rnE$IAJXqZE>Z61BNxrh8$Y6pxKi=C7e>^q6L(!lTY>MP<0>5s?~4m=H) z4zbyNMsb^q*uUBeiX}J-z4Y!-{q28(N*HbFMdu=m zgt4C^S=fWUy$9V`NA&4eGwx{)4Ufj|uhH^`nvxgK!$Z9#m{yCJ(*#whR_GOn8(b{P z^EZJ~#pV7m*+Hpo=l~SoA{`KMmAtfSNTZ;cQc#g8(Yt`bzFo^hf47rKiHO#z6Cnq- zVJ#3LkRTzl@tEp0xj0Td3Ji6wpWMX|5_}LJtrH(AE_j-GPc0m;uOSi)_%12#np4W>oE-PeIT8>4Z#6T{!v>z$5C^EW=f^9MgnN_|8QiY+Tmp6@&@bs0~__8&^BSo1qct z@bx}_JZnWhfd#XEYQrbxW|p|=y_WiTd4(eg9U?jw+ZmPQfSQv)4;CFNtg zgiHx6Nyh&oa%c_JFGyh&UjUOJ>-L^rKnJl9 z?dgfAPa_8D#CJJTpmzWm`%YW|kGX(eQ6M=%j86E(&=E`+L3SV|zXK8L4DA%?xN}}w z@E<}^w?)atS^Zn`eizf68v+hDCxszQ|K!(+S z)>DAyX-UCd{{A%+IkHz=mI$jiLjxQnW@CFlDy|#aZ-(Kuh;N2Hj=1Pv<_Iv9Z%^>8 z$X+~`Fd3GaM!?iZn%|IrP>~(Bc{ayNpGIfjE5Yt|w6qvmimLua@5T2n1IDv_!<^9* zku|ji`Y$GcX@^JRSK|KD_s!-O4sHK2-Y@_%BkW5n0Ok1yKk4iuUt9TraJ1p2m{6OP3hcee zLX@{%1_ehkb{srlBbOYq(ca7_alS%L3vVyw@<~pH;(2m875Qq_Wl(J0gP~Hri&8kZ z94mfRAmLF#YEUB};tzs;(1j$=9ZtwVV_D*x3rPnBl(*l!+xAd-i@ z!mB$0g(_rSGLp4Jx(H0u2X1@>$J+Cvi779uI~XqkO^uj=5|Ru$*qT4iX^7fCTd6^rgz{=HF5xhKgzpQDFYfn# zNf=9-#BFfvX4=cNE{sQd1VAj!5lban!zAtzB=I#Nt_Eff zL**=HYH_8ezavYYGEUk?PNp)2gw46dJFYTF*-4WlfipeNZFCwY~Wh1pCDUy%w4_()dw-=WWf81Sic@76IdfrO7Za08574=`Yn;E6adsf6=hh!Gm3beu>xnAE12}$Tg#YRAnphSpk-E5d4** zmv?40gmyW2Yx9V}mhM-cZ-m5+U&tt|Q|O?OXU@ug74$>TYT_9)O*T@#?}Rg3Pkr5^ zKnO&8FM--&ksT%@k6#6w)oLuS%A!jMhiz8C=QpJu5ngTG`RI=`4<~%h5VU+g_X=|$ zW8q1nghSeLGUmbC=X)r)G@LgpqcG;-MfsKHYlK?;MrU(#-~>x?sgfr8xob$WZ+!3; z`P=TqXVq*GrGA_1;QYGd=#DytjSwFz_2B}uE%7^yQI@VBJz%0&L4N;(^{k(s;Au*T z*b^a%%|P#@?G(>cUFkryo$EekSlEMBEL(ALGYb+v$|>Bu#Wn9rNMiGDEH1{Ma4d2i zGs^LZL(%sdRKU>|!icl%zY)5sAPiQ~ua`U+KW!vU$JuxJZUWiFTAyKaJ8Up&Fu(rm zm`%6m%#ZFKq({_CITNUllo;&%KEchHD9i!3CASM{uh-N0o3bQlegv1~#kTNNM2yVS zTnz^fquYJZ>thQje)d^kantn6Z#uU+s^Z+{fA^N6VM6%))w>rNna1#5Z1_KR{POhtY# zuOPw_GrF*GhlvTcmkG-vffzS(jjCDnxCzA>JU#Fmn$!`3Sy|&XbKE~r`UK(}NSKwz zaPO-=75D4u%F_|SCvR)nakk;vk%60$5~ZFvh4J6b2&PAR)RPprzLf?yJ-7=kQU$-E zGkRC0b(-LA!0!v`ld)a!MCfP;@fv-)if{f(|cVLiWakY#r-hE$=AAQ=CimimFCCEe&+59^So;K zX(Z`_qCBDER`S;7AoR=?@w}Jr(mo7_n*GFihwSasiqfaq37Q0ZD_>bvkGzw&IWIoe z7v6vsfX*;%nCJ7rF?X)>IGgA?Sm!2FnOkf(c|E-dTY6D%>ld&aV-TBrcNF z0-N9ar@zCYIyZsMiXC_-Fg*llrE?pjHCDZGd%gTavN}?+kMiV&vAb>ua|vkR=7REN zj#q8Y^#=Y)NW`@pUU~hrVI4TCjpi#vN7S2a)C_?)K^%)_wwi>wJc&Y_@XjWJWeRQs z7|k_ioo=j0^m&1{tX|=rX==)1o;+$h{KZq_3j&Er{Pb~X9E{1OIT^P}%X?^Aqs3bM z{MR=eqKlt7-)D>qcm07OU5dm@4s zFCDpKT4_ItZ(fq8y^LL5DI4+704kFuj+@HoCJAqQA5$&ElRXM9BUV&9CCL29^wo(A zR{C#+B8-wo^?@jWtQxnRfZp*&WX5D5y2;+J2A-1=Q48$HYYNh=@u+h^p|4-B^IcI_Z?wAer)DYyk`p(S?e(jY-PcASI-pTvGbr_i^5KzVj98^ydUVk*{J*_J-I$=Sl1xr5f2x09k+q zO}V`!aT|)Pe>7d8k6RinVgKPB*?Xw>BYV0bUsSW*Wy7{9eCc1c9O%n#OloOr2EhB8 z&jpbMdlX?MMe3ab_AVwEu0JwDpm7i#e&|Nc9={`mqf6{(Dnh<1N;#;N@zR-p6!%QK z#}BzPE<6o+m!yU2|F-5--03tc)vwlr2@$-n`lId2+oCsuMC5}{P%|_O@Jqi>00~(+ zSuY{w(sjUbUp9mPA2WB~4_;=LCe#7Hz>tcF`ma_U@Z;u}2XkFt^O(6=IGW6duL{QU z!fDu%DA7=7F3b>6lJQe$w`6`Cm$0I93!-BVbRNZ{6QCiXg=nPUq5ejY?O;7Bw)`3Q zV3kMq)p0Sy=jxD)Ned7RFF!rwJT40nK}1 zq`+)9qCCk8Ovc`hKZ0B=MP<|qW%o=><+tnTEiyf_dPbz-`Y!8=-_xPD9G1177`Mt4 zx;HAm?eWiRgv78=l+v3{RVuVs=f6NJp6L^l73DqfpGV`jBKAc)OGz>_H4qr9Ea3nX z>C1P`ulW4hSUT4%vTy!R-B9gpnU)(dGMN-S&4=&w& zjg|sp;hip{HH~31KsCr)(N8}dKrsz+mYv|Kd|YWY6yat7Gf2+lO$x6>r28uLOs@g_ z0Z6xkJ=?su20tX;iECRCu?NXdi0s6;3rz3tRlNvC%mG_O!W#Me6HbLJdK{d07Rwm+ z!k)Nxpg%}IZIyiS7$>!f`%8uS`=;IGhipO7_+a4;Ow{Qc46;OIe~L$_Ro0vq0}^#T z6!zD_-L12tJ|Qk|A4nqt(c(>FchtV+a;= zJZ?ZlYW#a7Yqy4lyc6CXQH$mNIxf@VG;4jhp@-9=-0PlmUh|4y+`V&a|3ZrFF}>|Tde`e@3vb?e=<&?ijoP|$6-MX9nnOx4KHQwui3r9UDJq#U-cEZR zg#6L^m<{`GqzzwkT4*Ofr^E!um)q=LA9J#GPx*DZ?rumjatsrg5(J@foVkk#q z=PAJH9(mt7PA&Z7;z%1csKOISY|-Hh@-k%PM0Qnf2Cke!-vyQ<{IV?hh;WlYPK2Qq z9hZ3t`>qMwGj3fyvXtywp6Pl`^6>V#v4@h`$l*IEdd2B+vB7shUOc@h00xYkU73(3 zoktr6u5gXb*%k;zwXrpT?U(>!Zrku++iFzw9eqFzHB$PGLG-cVC=ClH-2OlsM`q7- zF4MaVrJ61Msxvbu)XEv#PwZ8JrAyZWXL~hq1@!I}# zb4!r{30Rj%O&3*tKoF8z24ITa`FKV0@4=!%e*_T}mbwie5kn@X=}-;RC_4*`7-VBv zy=2zADA=N==WS%mt=P8oTK+4K5i^;9&kdUwhQ+#}!3hj3L%L&@`kD%Du$z+;f&&d7 zieLBDGCEmcOcM3+im8WOH4|Jfu2pAF{fvCPar|d@xuy-=h?p!lrW;v`zJDl5q99R8g~howsCvv+ni_UxVYJK%n+Fmg=!<{mBNu?$mC+LzEgEHW4ICcQM@RYWuBZjLs<3O=J$4_rBIKduF+`gOorn}@WQ-8HE9aaq zunLSl*K-xM?mEJSz(#R;Z=RZ=pB0SKJWJaj4*HuSfV=wnjnw?-25}O*dMaLHMF5ye zR5pyMe{6IzxHHq_oKr+uB9FX+@|w)Y72S9lvmY;M_wzl%%gEuVyIrAsk=bI$gvs^s z0LtR*HP|g!;9WwNMVsQ^vS@y7(8zj-o?dD(S@;1Es^TybNmcF7pnvNy-QdWJ!u=GX z`3tg-C*TQk8$U`yEJ+WbF9gJN;x$TH2$m~42Q*ZZ5S1mM>T@9yslNagYr0^y7%-AA z7w*@D0P+fd6-U>D#C0lIH_PAuHdI`EJzaeo5?6f7@K@FVnLI5#?vAHb5x9A>- zx4Q#$uS`=pjs@=_qRfYYhk7lLWWo6LhU8iix{l7fe_?9^Jabh*fjTUmqz-96qip;K zMJ9h8ZxnmsO*NO%KlbVCU1HaI^W;lYR?fE{wij1-x+LL4Pyz9?A_~+jG;HcS!G z%x+%=jagOl=7>=Tj6n^xS9bq!vy= zt6DA|!uWMhwDS~PkI96aEqqbf$>!Ee&_1FX!Mql6=`5OI_ zNe1sZ2tMD`AlzRa4TD5qRO^WIp?H`V<&IK-=*(TA(a`U+E%lUY_4dQh05q@Fpzlih z0jxUNET{ax05&PShX!ckA3t6HwMZ!X7u=*()Q-x{Pht_x2K-U4V8+hwYdc$$)eP+> zEW$RRTytf+rX|rgBv^iKq#LkxWnxPElcoz|5P#WvY{r1DQPacz%zH^&?tsYgrUGF zh&R>ABWP-Qu@nPSQb)aC{wevwkHR=U^1Ab`Aw*&Nm*`Zf9Fp$V^|E__mUnF4X)6Nz z`30t{na6gQ(_H2KVF-3-_Z83(dOw8HKxHi#M!@$_TngksBc|S+IDnM%&HUajqk6m1 zrQVc?aD^Ob*=g-`K9g>En_s*SvRk^)hv+mv4O5f6I}o<2(0r%dQ3{|KvGG-(&BmNJeBJpVQ*#u}Dj!Gcp}Ycva~6!RcOgP%&bt75LfhJw5h@ zpg7w-*Xm2c^&eUDu_aI2Q&~2RW?fp$cC663-^C8_W&ug^aA9O-#x1#{=G5_*@0 zL}M}G;tC}&KFa6B(G;6qTzUyRNq18N+O>0?15YCzrH*N=X-LR1Z0H9j5Eu+^kF>#4 zoZl7gIfXNi@6^mWJdIxkNcTk9g=+;OQBuojt9-5Tv$t1&0aM^T-iKI$H+U@}`_!BK zVP3`GZ%tU-Lv-N7Xm9-@sr-N_ys7$W^q(sT7}qnn=$Lbnt%<=?j?wwcip7VtM3kYbse|Ich1J8{yG9-y) zy(BDgYmtk<-oaRIkpPhZPdlnZl!%!!Y8++muV{>R67*qtQxQYkEi}JH^ubRHAA=4~ zRfVoWT<-*?Q>2W8OPW_st}}B2m&`(*#QSN4ga|EeN^H>aBEuVpJIUn6*1l2Hm{fcw zS8Qm7?a+!LNxCH)!E4GW?7)&~-;uGfrra-be$F5dvJijt0WITj8-gzg`|R)Rr8)UA$l6V|4|Bqsm)r(JU%LGy(IsBF<(5d@Ejm+qUFZyl!N6v1(o;#?)mGvm}^jnubg${Q;yKIAh(gHm&gw;Sw2>>IQX-Y1h$0B`Z6!70P);$bA$Qg5LoQ9ufW1yO1U~ zfPtr)30qEron0eIO`Y7ZsRkwrIy}`7l^5_;dr0rId?l=uU*Q;ggXoEJKB?ogE9z+OgtC zkl}$b=HCRgg2t)Y3d!8p@O9N1pF($LagKB!3?OtM&i1W4v*pZ6hh1$3 zQ2i<(s`CLKoVM9|8W=;am?Jl%q{;TUj8t^Wtvc%qE6dfWwQfi?b)citd6!Ofl~npC zdelSVPhdQav&tnkJ$yHP?z`{S!@X$Sa4rn!#i2PDi2l5@xuU_h@U{c^<&D$KY$pch z0c@<(2qh{@Ht6layvv#TQOYrI$|+M)(^mH0?W-t(7ByvAL*Dhc;9j4A?Z#PJ_``kc z?WVE1h_i~WZoGPe=Ijh)MV?hBPR3M*?IMcM1~4IU!*M3d(DncJ#QVAPOq2WYCtw$_ zuUc9?fVn}xR8K*>>}OL@lw-?5GUS)wi6WmOM~iUVisiy^VtiB;;Y9rWy#(6iN&ZOw ztq?^*JV24fJ=GJXPu*~fT_Z#Lf=h2QTLUoLq8%k>@9)EM#u>c_-;og@wHaQZl5L>3 zWYuHqG^#`n!@6@PwYGo(FQ!{`J*d4a4V1j}PVeHR`klqe8QEIjd^)^^!nLDuOJ6{W z%BL!c`zp0x^cdk?+`g#USR?{v?XyS=WBnK;BGj5%Uo1ZdwL1a2o;9*9@$4`RD0V<` z3x^?}{a--HYIDpKrVeT7y>GHW)Hwza88t}^X{a^c9JQX47@$d@5s5`Q?Z%L%t^%@_ z;I;KCa3C&plxk*+A`R(LIt5?0oS!nD>$eWUbtYkw8Mou!!O!`HD8Gr18dfN`NYyFv zJSlHJa3adik0JN?M%K{nYEDkK$rtuVyfrM6UU&lm=6?Da1oNHcT#;Fr3r;5pB$tsd zROnAdj^jH=E9Arj2}@NuC_Cjq3W|rAOZvJc;+@nQJh>iYYRLhYN{JV~g9k-1YMQZW z`vq>%B|N9uL@!9S$6%~IQ5bs)UN;fv5MPyuo=JCOMd8F)q(w${7sO88Q~bz!&Yxrd zhSc1`V@ncZrq0BbSdfEyd{K;RJNa4pz|vfsG~f>A3`$S&hvR<|Qf7-U_*BGxT2h#f zDfa2&VV{IESa1D?%%5V#ML_ESBG<@R@pU!_@K|BHo3waCVDio1!|$K%M1`Si>+xes zr_dr6kK75OEcT$ttg{!eA^dvNTtjgOq~)`nAp6_zu$cI@JpZ-Dt;#> z>xdRYIgf;f>3@PeRRN|1TxCK%0w2jXkS@I=G|tU@Hv1uJv(oO3Q~?iaIQ&PL<9kNt z8#83ex_a_C1$>Qdq7^rKWFLwvTpF0eV37_XQyuo&nC_*RSMxwJ|IV6Z$u6d$B>qi8 ze|Rq2ig&lTx=UUH_bMw87-8fCl~xZ2b0kuH_kVD<9~_1F4gM6v#P$O{Md3{8ZEv~v z+hB}@2Dc|}f)@r`_F7&4E$ggwM<7Il~ZL1@~ua0;6mAH$bnsIlzB-&zW zMrKDkZ&A1bZ=L5Qt|mT5#slp6M%Q_HaPVk!0R5wUr8vsb*FL-9t{`=|>;TN!LZ8Bm zcq~JxK(JgU>)}fBWIS|Dq~2sh^dyj66B!@X^lr9R6Ir@4o*-F$OFDjQ!5SuAXigK$ z!=kcOaRsAsD_VVyCB*D}mOlFCq})x?-veyp#m;o>7vH;oznCum?mz|-DW$NBLD#_^ zoGFMZ{}Vx_UeeszBxwUmK{FAwi2?<2&~=oI>sNHBk3mukAteuroc@vEvl6Ym8wY-t zRsVmhSsgF{se?wRO2ajaKr$MYsUp5*v^pr0_+Y0*RtaLx#x-rdJ&NsBNOG*Y*@6E% zxt(aqvE&K+(qmfO?C(8Po!Q?3pW2Bf6mV&wAgp~gJ4glxl} zK5rDXpie*6HkaU0bOYT_UIQI^5K3vBM97@;_IT&IpblOgVV2eS9q|Ggz$%m=384I2 z&#lkbo^yo+BKNxGyFie)drB>2hu!U1WG^u_QD(*{r&E>nr9$uRUeEh(%WK+|TDeKw z8CK`pi7T}qUU$d_I_i5>Tq`P{wgH~#0G0uDCIg@5`0~Em{`=}F+0wrQ1E=q~=jOc~ zQn;>}yF1c9(e3*$i}Pi=`-kSN;V1Mj>m8Dmc{(WKQ&nLd7G2-xUKUUW;B9%~uSr6i zX0%srR5u7~>6~I=E|b;am`RNp8XV6}5v~qbWhu7E^%PMkw?9l+>wmcdL!750wE_x^ zG%y=e&O!R)k!V6}3w44LYnZR8pB(-E-SO`D4c@-h|L%}za!1E$q zgAqvau}O z4`18EzH7_7*K5enAV$+!aKmhX^_ja$T;au)>iK?QK zw=ht}6RZ|&-a^7f5e62{gTVb{6PQA`Fo8q^l8PH@K2CnuidQHPg zg^2o#9J+H!RbWcq)9gbukTv_2@dsPF39}y;;-Mw~>KBq4K;oW?^drT?!~EndZ+L+v zontLky-hE9M|+O@>~P?|5RHuggI?Avv{}0d)64PmYajRM7eH8Q=w;7VHC4Amp)lAR z7<`t6H}i=CV2BavKm6!H|xB(7O*zGw3i!m_L|93?3f$CH8U_z=qC zPXDVm6j3Ayjkjf{+4Pmyqfg`Vxyjm5J<88`1crBV>ZYU&Q^0@T@RmDCSyQh zaSfcjxG!G2Ij`p6cS(+NC9jmEru!0zC!CcIcu720L;&hyFri1maib`LkZKH+sIKGE zxuR_|KMIV4^^zi_dFKMaqy(9~jX;TGbF(2*$xz=@v8~&ZzFm@$|H6N10W?H=}#7By$^JPi{5gCQm>K(%qGjM`mdrY;8b;4(Ggb zsXp@V+9Hc*g%WpicKGVoIq?3yyXQ9-^x5#2bY@>pNScvPUsg_v_%lg^)jy0<<@y(F zO0U`MaY3Q>?cazr0J0841{2;%aN`1i5XTVu%2&qo!Vw)gWcHRl$HTJM-FJak^+894 zUMcAG=nHhD?u$$m1T=zB0Fp~9QrN>F`WMhS@C9HRAptp<=721}aYHnupXB(g!VJ*4 zzAiuk$( zRuuvenc7m^fz(1XhcD1=;8q4Ck)Wg({1{-$#=p;6YL@W-~d3dCF^bj8q zYYUa|hUJ(Jrr}A^d$uyWE^00s6gB$J=Vc;UT1-2oJ zJ0yyj1GDYeb1*&O_-!WX`k-1l$449cZIS7{d@`)~15jSgK`XacFq@zdj(qCP8;>AP z$>tb7`p9AIcfgfj#$;-Q7ZAtBgwmHI3GW0-Z8#}V^9J8b?QBu!)fcTNyu|d99|v{uuKjR^b}&Z+_!*(r z33a=+`r1F{iI%p0;QO|`uzp7^_x_MX19V&)dN3DpRuo(R9y%fIRixZjaw|(KVf|R} zT#owik(ker_N38G^5C;FCTFNk4OgSg%}x4-P6aqH)$%92FT!zzEppnogvs@LS^PQs zQy~jwyk+GU-$yC}DnB7f*M?fI_yh{}=(0#EoLQN}RhCTuj!6Gm4>O$O(7JEKd0O06 z&4m^yJG=a`rbU*-7)!%BZJA9ig+HsWV1&>z&h1Nzzz*WbcM}n_LToEJ+W`i%b+Mn3 z53R^=S1QQBRFN7Idns4478v)f&0N0bizn*VSMJ5rnRfmKAJUxH_%TY^*fm#rlS52> zRqAewbzRehu~W?8;Yc(ShT8gY5|`+}ZDcGgLgdc8tn5EI{+zlUb<#AVuR?LTv^)6k+>he zMA&zMm7zz*WI5zedG1?OBX>>PUy1<(hM?nZIitSMAKP&mcr2T-?!ni0B&(*=xXpUw zxytnNYZJrq*X;X!xv)XdMk=`i%XpOy|G^>Msm?I&0TAU^kt^2x0LXginI}@xdzSk? zl)}8Ts$B|2QWZ*kS}en1F?^Iia61m|H<}>~6PyzdfL%3YgopTt@b1xj&+62taNH1Y zml2tT4b8JY44pMWxO8v-$~!$)Id+@>ffPL*GW&oAT(*vUz_^OuT4#t_9h|bz`wK&FEZ4MNwd6Ko~Eo|rJSxO4|Bz*zdbCWIi?FH$B zrBmBtXS_5vUEQ|8hTuv{o785r!T?o%4k*;*0+_2}FvVVbMy?qxLcc;S@aNa7Gf?JQ z2=@)oIjBqIe>&vWM3H>J<#^(eJef|j@}7W|>eixQ!5@(5!=oUwpnv~ox1(!PVEJ4J z9tk1NSK<_6*P30D7M#at0726x!*cggK^);He~*aYid?0B&*#20R* zp)qRAMg@p38#N6%L>S2JuO3qm)zBzxA{H8jpg;ZQ;la%yz=}1g4YM}&iMO#nl(~-S zw6nwESSjl(gV`J_=fk3fepp*Pc@2}A5y%J%)r_SJ0(ODxL{36FoFh@M)cgf&iL#-i zM*q!rH!XhD&3ANoEs%j5?U&S)EPZ+J`oXd?(SP1`l4mSJBIz>BVAHVEr?uL0``}w} z;KqTx?>KY6yMXf}@q){a>AUWhU4Jh$klcQ8kM~H!@|jFp&K`w*V-gZQOlHT(WdZf{ zII!0D8G?EOLLM4*ydu_G00SIl8Om#Z`>ZIAsZLyp^SGG!6zu*QCQ&gRu#9|5B%;%T zmZ5#JcQwPN(Eb{(=&3U`89Y`LT{K~iU8v}VkGpcJ!OyiZ&v`WnZtLveQ4PyLS?w-~ z#}S^?TDJH5`y{`+ZpreiVUPh;A0T)ha1Rv$PM;wT3hj}CbT_y|(CxTmo_zN*4%$D9 zHZ-Eo6>hjHL=rtZE@*!?IB zXvQsz$k_jS)r^>5WOV$&gO@&X>>(Vg1}GPY>OQ(3B?_O>G|Y4JZdHrOE@+dqCa4>RixK4)WA+s z$+AF%ca@q%KiN=(+U8ur9V5Z5D=45S{`1`DHL&%B!9(^>%$qBxx#8XKDBOwf$4#+x z$V6vldMvI=^>qhc+z;@0Q*QZ}W1`=F}I{E$m&4)diVrxWcW@PA35kbFcVkWT}Lw4Q^88zSG1#f1x17lN~|7wjAg)l<3g98|eNVF%i@dO1|2ESe8 zW&a>8`*uHfB8L$;Ti= z5^Y9nZjJLl;w(uMN%yI0f+DXm|B$M+t-@qjrTWy7o3u&h__i?_Vwv1bYO($0Wr^!2 z_Rfxc(2=Xr`tetx(p@1Zi)-lh|-f=>9V7Yjuy)LIqmeYR}9%rqJ=brT2-n*>yiV zJ}uekJZii|7|7EYEMU~uJQ_ieeS36ONlP?{({rESCiS_^MrWbm2K&lQUo$_`1s=|m zwFwcmtwT#S-6s`%k?3Ni5{u0#7Q((01$ol6rK1EjW-L@xdeCv*Bc7->-1-{K?xb^t zb%%&$sLbey@?sJK=FPsNdikxvtKddf8~kvriCvvdfOI@=Hf&u$yU@M&&i47PL$}Vi z?24k>!w{re);8~a1!IS&g=&}f`B7A-hKm(8$9DvHGkwCCfqJmm5g8zMO`|fPO6&NI zKZqe<)t41ZvlWnp#B0>r!}Pez)J{(e1uJJVMzrPH!Z?)wsE`5EV5aYjin!4`u5UYu znke7)kqaH1ETS*I0a=7^^y`fT6U=v7=;pOgbm0cFvX;8B((b$Xez-az`@7d}@HV`h z6%IOh$56B(?uki7CB~zHCMqNnjaai?YhuV!mm|rtoEBr;GBGIIP|ulP+`NN&>Ekd{ zK)gEXaDTCu`bPET47zec<{lK~QnjsI7;j%~->cMRa#cS4k%98=79V8H!dqwcA;;|Y z$#fcx(a{Gj#c?a1RBl2>QNh1IwL~Zza}T9Kpr=@~ik%^w>gV)Ya(k9?-rdd7xNqe4 zYHM1R_DHx_Gk#Mc6NzR+7AiEWSP=V89I{XgjxE%fXvi|SqQwjHCm<}4a^%#@YbsBIf?(^-_)@%iz#6$JKakYx}0kmbW)6nI| zFXqcdUM&<}wJ=ioZB#RSvrq1W=Ar`2^VISSA9VkSUX^xozl12}RWOzL^ zf@dS>?bZ!p)ZFnMPA#?PVKYq?ufG8SpUPTR_6S^V%GE>2m4Z> zU<1W(*)>zkzh3FPwB8Kcjx5k{sWKtyXWwL%$o4M+^P|33ev=nmW^v6l9!59cj~`r7 zqnT~*X5a(%Kuul%0CTh_t4xs=!|w6+RlohaR?JU(w1R%(j=U<@*us2A@!P1&bNOpG zg@ToCvg`1Lyo_Y#+5O>6A(c(0-E?Bwz6wW<&l;&@>y>4Uz?o8G_sQdBh%A{|zR&KNUmli6*SBC93WPfZQ(bw$s z2X}{P2C?rhm1I*nT^fT%P{rsH{-+325Ai8>@2zDDt-qws2T(9}y%lk)EcO0cKU=FE z!*f7rr>WWUmHKxolWY$c-68P_?)x}f8j(AxuNBytvgJsczJY%gyWA$d{odwq{>!3X zey1-n!j>+cmDe!Yha2C2bQi|%Q-W->CECbeAzv8N=aPZ%d+HGj=R9WZ8op-8E4oO( z>zR7v%mKA)`o;6Fsi!?zv#EXzRQout)nW`~skxnh$u^&QZ*`EYFMt?h

=vD0g=H0ZYZ~rQg2{;@a?3<{Q~3Dbh1U=iRbtzzF=G04s3ZlAW)mpKkZ@h?=-yK9ryh$ z9YAZY%5^Bcxb5<$(d0qDv4pTyC!Uz`lAlEL{Fv>8Ss}os#z0Al^Uq6clFwmy{l+MJ zMjzLf(l2QrykzLvDJ)S}`EJNzQ}=Bjy|VrDoos(5O@)g4ZAqNIv12i1@07p%qr1&y zk4?Rd7LIy4`vxnx##%C**z`?EDj)!qKU$R|Rof0+ljCT$2gFY0UhoHFC6$-+Y|nCQea#i9}z^WM?rH8b}FliSCy4%fUa>N1z*F_N%&L^)fT zyC>JS)FRVtTos9yz?%~1!R3u}!s}E(Dv!wubSJoN)u%UdETJvF4LymeeArVUUy)|+ zzpX?g?wU`ht7JU*-t=j^Y|r`1fUt$)aegOv3K%r_q7=DF`09%(I_7Ux#K|>yt&J7i zcrPSfy-iUFRrv@~H(q_K+{WZI@ijS5oNli2wiq#ie*&FEGTK9lo>5u;ZMFJ-<$*E5 zg5M8p%Q$ebDe&hf`X)h%PiVA|zw4AuuEy&Qf+!pm9{#B#W2IkGH??pBni5+=w2{MD z=|FtGhzyVEZGTK8DZ}Lp*G9DZ6T-3sO=?HZx#85+TnGu1f7(ONEBkyywVX!Ia@uCO zf&SntHZ^MrBtGfi46 z3R3c0T-A3VD!<3%GURiMl)(@zGq#06@x={6ha}6|xbyFz=gDdu1%LNJ11uo?J*a-c(KWKoT2sSHS4!-GE73~INZVG)<; z#Jh~2Z1GTqVrEf_d#sY%zud3z*odpETc~3fb5p%sf&VF-etVB>EZ)#*0re~yr}*%3 zbpqp$_OIujy~@+(qVHgeOHvaDTvr}vU%Nb+c`Ku_

Gk{i)j>7%o5VTRfp%K6pto zTBbnfP%D?s+t<}D%oRZ1aYp%EK<$7aC!fx-e%0|>&1uJEL64m6TOqTCkEjx3A9Z>9 zW9PxKEop%M9u7_?69MKGo6NqYdGVph18KiEZhW_w)f&G+beC$;zO)}`d6a%SgtIt58z3}?#3BTLS=ENfC^X}JyT{%&R++Uc-q&F-B z@N8%7eLzf@Ugb{5C2gOs6JI%!-}6ifGmnkqBiwC6w2poD-hlKC!Cm8grL?_!SZ1yd zEMFgNkJUcQ`nDBhd37Z2+h@1OsegY>igC7%Ammz1B<`+*TlQu0R%@a5( z{aq+mvYvO%W1iQ6ek;tj*b>n5}K ziPuW5h=JvC%&RWS+ZPVbx!<~l&mG~|OGJ)hf?NGOC&km;*`M96@V}*2^TSwW{_2;N zBfV-|=j)Vt!20Cb&uc7gffkb`TWshNX=C4cw|xLc`7q#57~^jcCz(NOxbpa2ICg^8 z^E8cqS>C0P>j<(Iz#bqI2(II$vQV z_}e=M$*DrwWk%DNFAJbJ!_txkE)ZJrc4o6bWH9Sb_;-Fdf!`uh3xN8h6i6U zwy<`g1?Tg3Gwz1jPco}rSF@z+78aqGm5tY8#8SV@;)B=748{Hq@9(aYw%XFZwxu(U zwUPH3$9mywq*_K`xM{rfi<4!rl|q*`ea9A4U*x$l^C>6PyW=i2mCa_h3pebAh}ljG zN}q{`lQkGN$Pp%NedMZfRN@~NH~hZ@X?^Pg6C>>nv9~>Nl}bS$T1e4u1G z&Fn6J4GCldyH5hnJ5b0|b|=u&UTpjc_y=|A&b=)sJr+oI6>Q$!yqv+|bHm5LXpbT~ zI3W+Lp<%|$|MeEh-vfmdZ0yMI=qn)u=mRiiT0g)2M)$Ky*oM5{-$Qp81;j|oj?FU& zrz16G$3gBhOc~0mo9=k?{#q%ttXl#U`99XnQSqnlgPP1N zRG8E!7rQFZ{xRcJBOIikY}w#X=aG{sp1vFfc6>U{Q4+G_4xvWpf4%tbvAvX+5&0*q z86yE&W1K%tUGC|hPmS(KNt+My(`9D6ZQ_t=7GR0) zXBa0$BIP8&g2wXoxjdY|av)`-o!T^9MwYCkB%;=HBtC7QJ3vvuY{wi`8-7BORtghw z#aPUa0=>u}XtN|xQF+cEt+Fwo48;eIp~L7qKN9`19J|J4K5OQ7olQ(S#~1VE;&#IQ zmWF$b;p27W9W@*)W^rFRPCC|O8!R)Jd3gB{=BIa0WM4-r=S^c&X_DLzHW;*w_X;sCj zUC0u1xxC#=WDE$BXf4{_;&(83#@l|@TC}K!Lf|lxUO%oZ^R*uDbWS^Vrtsi12s@x^ zyR(a^5*bddJ?*Tl+IH`ZmtEY(mjJZ$>u*%wBJgn#=5>)qr#V0fi!VzXk;|Gy6270l z-8n6_Q+0ZNzG)$rn>@RiRrDt3sOYoU43A9|mqNA3j;W~`=td0HVcuhF?92G@Ll zBUG3`qe0w};REJp{3dzJ@{8U~WIy_=?>LQyl<`9)qt03c? zZ=@ky?!#OnqVnBk5D4`z*ff3V!E_v86@T@N5T2={g(L9H&1gFq1M9Wdw&}f2n!wID zg5hf5r?|KCf*OXbxJlN-{*-aH0@+4BH9xyOsYk*&l``?jWh*Ftn==x90g|LMa6!Tt6F7Ii$ zWi@v@0W1Pp5z%lZwS%9<-^6@&Y!mu&;=IR=uys*;SNlG^QSA0MS+(tB zqex(7C>f@#OEv+zsBSAXfYUR{o;QV5{x*VDrPMObcnt>hnVxX(-W0;cet*^=n$p?tHAGJlzGnc$)Y;EQIKsm z&*lGY(y9E=Kx68dpDN5%sdosRq}FGFEX2g@xBsKG%Xip4Mu@dD`~o(S69aj)+GGLGge!}NgDDj* zw^-muzcc9uZ9Cp>dA7Q?>Bn{F&goQ=SbG7NkY$1~Ek;(g?Nhz|b^2PvpVkjO81FFk z1KC8fpmp?=&nIrXy`-iA&qwr?Y#digKEt79?=(gnw%lDkM@EQW#Mm>jSFz<`$bJd) zp${!4KqI5v^Z@30#i(4BL~`L;w0Kw$o0EA_e=KNlgg=t@zlc#go>TV{)LLGFbHHj{ zKVOQ!t09daw9uJ$dr#qLH%A$2l))kLfkLw@UxmCdk9 zpa^wl-L14D|5Q^?A~Eft35Qwz7qejx?k_n&-)^p=itLv?Hy#I3=$OiA`^!1 z7=D~oDdq~hfvk#W?-+tCDE?dDB#g+aDCbC=oWnd9Y>_WlT?k+(T~4gPKGqdvseHYsC1EWrjXv`e5vk88IHBx5su5#?l>yojXx0sOgaN z80!R>HOR1Nnmm=-70ai6W#2{~kBDae&bZe{y_tlY`r_>oy>i#+ToHmYwtWv}4d#TA z>vBSbaU(THKH5XBp2BdU^T~I#dq~Bt4P~~Mf|pM^tDgCRRPZVQgwyiwW(6ahOJXix zyB66v8k*fGggzOEpP_VJ7*-}`^K!Y#4Op0rl!PWb(F+)~2IK8>6^$;Qja4#@y_$QM z^s})haz~=WY+2|I!);kBLdrdp91T4%I47&kWk*HJM28!m4Zq4!cuUD^%&K2g!;d#8 zWT?T_$iKeteU^MRZ>a71eO{Ir46yffk%24Vp}3Q)ITR_xRiPK{@s?;X9FjUdb^$6S zdTX1o6SHLhvj0!H4o4}6h^)A%Gi-2NRC<@XC_HZOCPTTY4Dz^%fld0Nl<>ImT~ge| znX@*<9F_DsfHhYsu(otm0KmQTHM>1c^8ft4YG5Z@v{i~Lv_-X57}a;?N;M3kbK5U) zHW0ZPla^##sAJh_&jT|$#hQsqAW0Nz@(wiQfCgrD6x>ba9)@{ff1Rp#nKMSJjVw@CCkOrX?o&OKO+ z$6z|$4PCoA^>g2Rhc2rP6Z;JQZio5kJmk^!BXJXv4RF^BY+Q5L(_+RHtxBHa?yXMg z)kHqn5E@qB#;qMBSP}Tj(yxed)_{LV&GvvOLZRxG6&g-YM0sS>`i`q-g}4Ia3p^gf zn&A9^eV}%JSm=tw7hSaEzASp?57rasa-YyRCh~0h`j3PF%v&WfBQ~J~NaP=S*N9Ye z0%`9K!H-h@k4x-(rVhl>KoR#LO5#z!1Bm_`ng&KnNWumzUGgajeYP`9_gM5lmag@d z(UTKsv>uVweu+dcA~HM=1}Zy$z_;T%3NT5e%$og(rPnXyh_E*+RVrl08G*e??E6@B z>+C!6Y$aQ4oY9){mH#17XfW=4621uE{8@(!PzeA-r$B+NkiU-*3$H!%=@~^RK6jmk zCB&$E$YIAAt!p}6fBFxo_Vm$<9e`K^2DS9DqCU@EEMyrF0M3p zU|bi!#>&1Gkdn=g;nD@CB4szS!#QG{8h)=N*4LG~`My#ga2T$YQ>w%bG@2k?BciD_NO_&^THtEqYM17Xw5y&bgf*T~(S`aZE@ z{0~%U%yj3^8M7|w{(nG)=b2ar0d%6OVl1Q^4{LJEJ5Nuh06tIM;XUhXV|Fq8Eyi*) zO#tUr<3D_?CXXzNs*0hAZVY76busnOi)2qDZc?X3po$iFAE`pXUcs0^V4>oz`0dSj zIW&hl`*3{wjJhCpC=#K=WB*WNj>FURP3)a97tYvcw}-!#zXA5y`c}mY{pC*+^+sEc zPUhE8BY*0E$Bdrb5k8|YL?Ps}s3*De?B5l8DQT!TZJdl3VA(Sc`05D2(uLkYIk>21 zH_OoW$+zrT;aZA{)t4+5?q=T6mBBd7`8#*Nf5Hj9r(}zR@q|BJGv`xq|3e}}g|kI_ z&IyZFyQQdU**K7e9Lbd67god=xFD`Ta%uk#-bXx$1|pbO?LTGAE>*m{KW&n-o8#rL zob9}mGn&w7CxU$z!bPy}ad#qE`;Z?j`3R4{?#rZ)LqYHT%f6qcGdvKu;hU(6Ku6?+ z)A&{X@f2-w3}i4QUWb$Ib0XM8P7?*cd$Fb|xi0D-lVS431oJ;8L+C#y1HrMrLp(6p zs|52OKVj0wfL*|SZZH}6Qt*EDFc}F+fDpirqTC(01ZSE0`$&%1`z48Qm==T81ZU5v zcm64x=s#+V_Sr7TWVRX>X0h1GS@~T5(*8VrWurAUCzWLQytJavs#>NR_r^XL=zr)V zgIz!;bv;uW0!hjwhyK%F{}ds9{mLBwc=oLo|9xxnIZgeR3^=EM9_br znK0`Wphqm$Qc56mJ%=13+s!c@{Xs+IheLarsXV@zj)`C@WkcE)HZ;JEQ=lnZOIi9aYCeJ<)FuTZNuSe7@z$rHyGyv&F+k!t5og zT@j?8)U7A7x-3@Ns~LDCqs0rT<(n?{t(EMEKmNSmQMtla{i%~Fa8T526MTe zy0fJ1o17{wIT1KqtYm*#77Y?Yo|Tj9>7-;UdPeCzy8%x)iEr57#0jrQ?7SDZFw4J} z$U=W-@E`Hx)4u>`_d7hzoDiPSDWq`O67Lo=MO^<`|1lDcf}0>zTd-9ZIPa}w&y%MG6L9`s1Zce0pn|i!8#HcOw)}bgreaiiM+~0 zO4pFc?n_lvhY89aKSawtGT|}PAve(a@7$o{U+CMOHH7#3*bf1l{MCRlEN0RpVf zs$mAx2&h!hiQ?FEvzfz3_8A0CmNl}AHOi0es9l9EuC_^Ee!B&pGCeM%OAI?u{_HVj zEv;N4(Vy@DFcwsRV7ZYjeEmynYr4$9mpBXlNHh-F+0#@1n57f#U6P1;a!>OH(xs)C zWYaGoVoWM_+k@U{@;wI_PJ~T>(178!U%+)90fLfJrEz1S<-16o?-P~Q#r^k-cmR!l zG&+45=!`zVa?avI_TIR;{)`Q56_sz9#;>5vd~AGisx(SQB*ZzIi+TU9C?>Qu_<-2u z_wj9jAC2?_*PCVcL1uSsw&lKK6unnI;@VZNkkLxjZblSB1k{rlA}yV1hyl_>q|V$P)E#3WTckdiYvI-|TlRIuw<#T>{3h3*d_jcNt#lM;jfPmF-WwG!-4KR~v@xn=or1H3X_K&dm=mq#E& zhez=mMi#lt61JwcDYCPZ-!|LOs+7N9|K1imXz+J%~$+pg*5L85LBa+$!lAC zJXQjTDL^+-^o-Mc2AU_ih~^13rU-IP6Sp^15QQ+1^-MP-d8#}L12Kl_ z$*C!N3HKt0(#o0+0{I5dwJ|o~x95+;(GyFEeNA#$_39}AJ%5x${yk{&Skrw8#JO-) zJaGfQ+CTtPey9#Qae@g{+R|^R!nEMOpt#qC(i0~4H91+hhG3$l28;>O0O%Elj3Rwa z8nJtH^B4S0e#1YE1NRdiUuYC#rC9izJeZFVcKzDc_LFf~Hv^|wEel#}(j;$_%3<`D zW(Iy9BdbYKAo&}NLrinw(!X{-ChcSQVd=j6eXj<}@)*yO$jA9NYRgiig30Wc=WT0= zLwx^*@b&nH(KJ98XMr57!Xxe{5!?bV=oi)qb+J~vHBjw$qe6ywOl1@NvqNC7#{ZSzoC1!wASA`vIMqOeSrPe_7U(b4_*9Rb6 zjx#6tgtH`hwS*`SbVLxY!A9gr(h(@fqf*QXxgfQidXp8QW0Y7YNGN}vJ4FWKjAUOC zqG;%Zfe9|J8cG+YZ%u|H7?u~880$eRQ3^yfybJ$$0uSSbfRZoZseN_;zFP7D1D6h! zs`rRNcn$FxzVqQ3@d@C-ib33)G9ru>gZcynT1U9bE99T^t6ps7#=RLdUG(?HtDN+> z@^UsGuoD`Ay+6uK_>lHE-~_Z>){+!R3P2F}Lf1b)eIbtRstUC16in?k)y~0zb=S(7 z_rUt06a?4?cX!ODQ8zVLx~LeUF~kip4gdVdAN_9HD$2MOwcRw~?u#$)tkb<9%3w!2 zK8U(n==CTPdMh@ixig$|^n&|J^V;ly&OXTWI0An5!rcxHl3RcX2}@M7`fr%?hbumK zLcNz(wwfrmkQa~7igi~dodqk4^k?J`EBV`%VRQywx6ls`_d+cuJn(k15K{3yg>Q-q;!DL8E#uq=@X0F3W*2 zF~0?mxLbS^AiIpH3x=)c&`fyrJ6bMfBQfasY0ua{v@;hLf_CnqUkBie0PY_g%Bi^6 ztIn|jA&3l3T|2#N^YxMcg<~&U5vi3$$gH@1k~`s?Dh|cYV5sK-26}#79}%hvZnYe> zUoSucD~>88G?f7ocrQlid9Ffy;O1;g_@Iwx&0O3~CGmJl#i`)u%^(J}7Rz1bAh?a$ z>kcTQ4$}_(-&tyHaQ;@w>$| zCPxiAu%L1sv00Lw52sjSHrczMC+CSZkBy00k{fzA;K;3A=|u)zXhSF`X7kV!sW@tk zr2^~;1S0*Egc;X{^mq^Z*6H7qVwWUJifbkd2wgq4@(2MrYf%3;$oZi|F5w+1JgVoV z?{58{!p=LM>i_@y_OY^akeR(jMubCTWMqUR>)0zClD&7x&R#{xQ6fTQ&oZ);!Z8YE zC7Z6-Ti@&Y{I2W!{obzr>UNwuc(2#%`Mf{x4}6{{!Ek8egHzt4n?%&iBGJHXt-7xR zZxh52?#w(z1!JoDI!UCp+}<5v$vSNDO?tgWq|}(vk+Wo5A0&qRy4Za8Hon}|Y3I-t zBB~w<1S1GbEOzsT%sk(L=JHnjWtS`GCz*_nvQn^Ek6|#?BXl#k(b4dnm@p7tdwoG_ zmScZyWbV6~S@c`5c8K5K4BnT!VpELX(ozm-`t7vx7CP@->_tsJ)d4j?qmjBP0FsuW z9t~K#I}t~6Bu}7!gOSxS2=3%r0X`u~BwNL&Ch4!fHzsGY-q`$%kbJui&OXjlu%BNA zm!bT^yL-%E4W^pBU1k7jXUlSTW5INg(`oqi`wM-B%&CSk_{CzP3@Ul3O}rTXH=pb; zHG!V%RgHMw*X~~fe`$AwEo4hL)D!OqgwBQ=F-(?$kFw$Oi@$FY0r~|)HO5h=+<(4C z%d;Ig z$5+JVi1dgX^6W3OxLP2Q&_zIrYdoh|sE*laa_&{XR#UWW{ZRrCI7pMCmmhT%k3S8U zd(3ns1 zsO2f(%KHYn@)$rd05qOtPlW0?s3H-MdoNC!$owFId0Ded&8N@f#CbyoAQpYheT5`% zfSa-kbm2{IOvBrb7$eF1h%mX;JUw9=KIXg^H5WpFwKSP60-qGNujbc4VeAcQ$V{#3{{m%wD#o31Bm6 zjk%Vd*do~X`~|ubYn_--Cq4^l-vaDW=moYYC&MPLC_4a6p^m}egf<29{N|l3dyc&j z3fb@MpIoj1gTgXZ_{!b$GxD`OGGIl!pms83vfx}o2};sLs$_3~wvB#E^yi~LeDLWF zzc5}U!go~lZO@jPdA7NpZIvmWYIINymGL%*q?m!brg_+nM-I0=+OreGBGE$6p;=`rsrJe|XSto1Q*gf@l|J`-!FXCp^2Y zDU-{|w^cIyzQ5|y=QEM5j!v>pvfNHSSKt>+-2>O7AgLD`_p6pB-9brF*FwK#7+is{ zepT4%&*q=Amu)5Xdj330TnXBobcgGd^9h05OZ}MFVL!d#StEjC$cm4oGY{So)P>46 zKGOJz=YprH&=xJ(H9L=2Xp;UvXlVr(=Zje*{FQXUPQXe|rO=z44$`-lcq*qVU)K(N}rJcba9;4#lk zKfAv^{$U>z$eR;aoRQBAR=jCraHF&nV9gb44RRNL$%cnyGY(Y0v$7chqZF|U`U8sP z{=W{PBsIe7jG=E|BxxtfE>~j;}TKbmhECACrgu zsI(e1AApQ*1+zP0L;4@dw6Ge1QWbDU(>3=v$Lq)(N>U@dU;iXQ16#VY2SQ;m#U76uBacPNd9k{f(W zW$hcgFebyN_I{b{TKt-kcE04Fa$a@XamI$~!Jlm)A%klnb}=>DR66+@iWbqOCaWcc zjClWkuDe_Jx)YaAO^^NV_}v0@oJ4wJQsC{7xA)YLN))yQG`_AiulOF)NSz(Q4&U0f z0GFET;djc+*!yrR5hM1hlakzLNyqo&B&gzo%YG6*fQHUa+oQl<2zf*o`E1?!%FPoK z=QC7I=ypjw{kxZlr*QdCC#B<4t>t=8QYE(@_B7>Y>HhGm0seN4HQhJw1_P%kZ%M3M z*Z;!y==Jv@gQF>J8M;aJo7$$jD zd5TeKHFYghvauq1Y2oc_G80AR7YG@v3vA04!H0Hn@schhGje>wbN6_hZW9sD>r=#Z zP{X^-(r*M&C%xp4eaM5Fu0@!g*d*4bZdbOywq3dKFd67;$u8~9c<`AXe-5%9hQX-9 z&4eN0DzURo&gqW8u+xS_Wq>Cr3@xVG`^LN%LK6+rp5io^-Z^t?cVaS<$P96H9wCdq zNu@*o3MoH49E7SRw>aB~?BkZ7HjKzE`JS)M>c5)7P*FdmN*H;9qlasDz*#{(l{K9yv>PnW1V^6NOB}O zGwbVGFjfiU8CVM^7LO!6bU}_-#e15V6%UGpEm;!|Haq9jI0PqN8T1(~(a?uEej=oP zvO#hyllVn91HUmn!qwUgTj(t0EVo;sQdrsFYtP@J;N5Sf{Zv=VV71#T4vc4&P1b$V z1=!D9RwE#*{x4-Jfc=bEwE1gNPAMIKFrN|>nfUOnj@;3y0mdr1iIK16kCgPjMXS%Y zIW|kpO0ymhyh&LA(2oq1qV+7>^kT=Hwu!oa6|xU!&WD#N)-AX$9Skb8-@9w{^%vhT za#hYn?H9P{XjSs+lz*Q9LZ;p(XY<8^0gY!X!%Vc?(_IUpkh|{>FXVm??1)_%$`^!p ztexI=-AOsx`zeTQ+&9=JYMJG}Xh^hSwy4oUus|VoW}+wQT_7JIbtQ$`kZ!y8oR@yv zktX>ZZxQDA*g(2~izY1t%s(FNrrUnQiaQK3H^zLhr1-&KOH|;y7%VV;m*qz+1QF)|aPq+BszKC8mSWI`8t9(wIi<@o>wlCZYNX|`^Pd0Q+ucP;VWVp!B^6FZX z1bwp*|6v_PRt~$sw8WF*V-*!&5y8A+U?V|}WA~Swl%F~z zaCSGl2?|&aA#|HQjnEx=i}ZLqN>XOI#$nbV`h>mGx$v=FQrw%pTkAXsT-M5n<}GM>W?HTz+#~Xb&s174+WO7{-!ewW^%M>+ZV)LHZ5Gq&|5U7{m}8!96)ofEt&3Gh%0od$kpu z9m`*PqfT*Vmj_||rIpp0k#ASO9KVb>+5!?Bj{4xsN+xh+9c%-LT^NIgjP>+*Hn5L8 z`S9(kfMT(0%I+VcdO28OHkWmIHeeF!>;7~*91 zNHUT3j1^*gS=Dd)30}>9?#oGx4Dt9o?821Z{-8eHrhxPN5t ze@8$WSE^s|X>cxWt5eeG#fjV2gO)W-Za{J%7zooUM^n3um%uXL)qJtS+ZOlVb*ykN z_3nWs*4g2Gj?N!R=7P$u{yoG`KHD3Mf z%`ochI`$!iAz{jmWx)A8>@Q)67lW6vhm$A%0t_`FigD@NKt=pT^~ww*(+X7@sZw_F zvCE&i>rOZNrQzx)@t|xLJad7Co zG*pxfHEnTM4@!7nAvOGgd?l`q!|pOHq>ou-i70M`#v#!3HvPv~{0?dY*Q~`cV*spPJ%! zx%c4Hn&hCmS8Dslx@z!o6ciG0AByKlcuDOspCj3@#7i;pV0}_M0!J6F|s$PK!EJ`T=N-OjiTQH3S%F^nXjq^w~KXC z7MxPQdAjInd^W$D1L>`Opc=!14SphD-aJTq*h*O-i{s=S0&*&DDoXY`gxehh2S>*+ zA!1Kg*_7M#3WhS?1j@0So~Z_2O4|`90~NW!j!HyfGWc=dQ9kFI+9#3}M&Dg_{gPck!RS>ZOtjph z3t4AFa8byZeuS6(=AB9mAgG_nM~@ZY!zj|b|3RqJw9gT0>vG*f1y+R(vL4PJ(mV*H z)_p_aSP0uwSJng~Il%&D+-gcM3G&@S>m<7DQDh2dzCq2i7=bz~4c`Sk{? za>ae(-eP0NKW$3C$OClrC?+2s1S@_djd);^)u}*B5n$tv@A8?i*Z6*{5epU(e__T| zPlVzb2=JkL44&j2bg(2~!1joHLjSS@Ycm}65|Pr`AE?{xSi#|T&M8FJT_Zb#=-l1R zIu0c0&VXYi9?;9^vgRw5vuIXLnwGH!IO?}}8w24%mea=y5)&2Npx0X2=FXYYl0 zU#ohI_v&vlQ9dr6svh;mnzj|$mL@aMC?MP)lB~M!a$S?zBCy;T_xI%XK#p@k>z}B8 zglIuPFEaIL%<`N4snMmjL4mmeNdo6MQ1cZ2__~=qYydP9O&HxC4^YJ_vx_d0x`^bj z!@jr^C4ePY3@Rar2$L2=$+nR(++}LAJAUAx?WXyqw|ffR$Fh~oF?&1+d&;?~Lb;|s z>;4y{o@WVAavI+Rqwu>+F_avYKJ&ERcaXhJ(gUw17~k>w88Fyjve7yUHw zQ166Qk!~m&3f6Wp7M|@WkYn;`W(e6^>c2d=dl;iZ%v{JSnEiN=)E^BQy4X>VbjAHv z0j_Ho2iDJ>VEUxiC@q~VHB$G(AqXXa7;38y-mETBw=W0xHN!Uf)5K@V(EpR6rj-4^ z8R`Xyp?;oseO)PU!Z{)RRvwGjhbsjq(Wthv>)elkMri}`5}XCXlubs%b%e2fC6o_8psNU0u>I*Vu(-Y zIUlh$+uhnVS<2%IKL_JLSym<0P)?kX&%hiWnjB9DAmnM%e-7LJ|JT*ZHcp z3>br*lP~c6>iNY^`}t<}%v~8^(@^f`&znV ze7MRrJQorOE1KdpF?$M7YPbx1n!NHHr5-PI%g;RoD7F1$`+yXIa}>Bts*+y^g3AP& z4Zrr_@hBDXjem}gi#KMfy1vt&qYq`?zMrd+H+Q<>qok(~a>k$pu#vJRx87i-Qa3O~ zoJvWtZiF>Egr>mpp-ib_UWYUJxHf_xB!URXIdF@)+oqtKoC}O5UG1h>gIgfwKltojEYc=MGRQq}H5~5%;vB(ks;Sn@JBjE0whOv`R$$0y8T8w9p?;gE zTHauxVryuc%t1sFtv$Pd84Rn=+YVTy3V7GPnjaf!J>k#8_4lAU%`0%y2me!4Tr$Kh zPt~)3<>FSDM}8`T_}g*TY>{%Qx5z+A7v)5AM}>^mh#Aa_c4S=XiwAZDf=SgzrUCPJ4z@24ZQK@LIZSCEjt)aynGcZ_;WBAt=nsx{5p8Y^tm->Wut>|q~lA^9U1oSz~~ipYRp>$%163i&uCZw_P-YVFV&)UR8N9|50L zvlv)Nf$89PHbs2Qief|5(IM=p!xyc1>N4ivYpNcI&VpPoDgM*ACa(6?7;X~pSF)gv z1{9|+A_id@LwbvCN{ z&RqY?Rts$v>3;Mvkx&GhcJ4of)VHQ3KAsfg%lI32ODwHy7n5Tb z0nHVp@~@RaPC)+xQj&R_gcIA3zpaw(>t#8*-4$a$l&1j}r|F)Vmcf43Kd)lrG9oFN zuEv@yYLCYECjGgi?;ssi;lso5Jwak9xru!v$K?h8@5Q*ZgDYWN$m%`%S9>~o50t#` zg2i9l;2oOWm+1|c?!Bhw5I&h`Y?x;;Iba#B%~spC_e{yUz4N5nRQd`(_S9|Sc}5ot zptKKuw*EOAE%SYH4<&;684nkI2ErS14>m+VMnIJrImY{A^CO@iYb5Xoefs2gBFn?P z9V(C59m?I4Smhl7LCY)khtF=iOXcJv(Ho46iPI-^L1}gSK?9JmlsgO;?CgH)pRwyw zDfj(l9WLv$L9RWKlaS(RhwJ$OG)nt@qXV}~9yQ%xRX(`e&147R0oD>v3&^c7i&!kA zE*pdbtYallV&M!0Y%`JX-O$AnXYC7kc4j3Q5&qMP-%fiHx8~yPZJd^+*}&#$Wj_p{ zFV$#Aae!#CLhB}|vEugb&>nXSs;QdBG?g347WN+HOA6bbhI)T#3&W3-MwSE9)N7!x z#kw$*Fkm8(0cG7sU#o5~eFfVn6UwhpJd|6P>1~!d;hV0wH$!^-p}9gGnT5zF4T1Bc zzd&fL_;&G!CmWlKpRYE3R`Cf@l3) z0p?@~oaUu-7?yrWmb;^3D!8SR%{w$=qD9Q3ox`P)twTOfiE}_9U>z{3?AET!l55MJ zES)u8-IL`%21JwGm-aDLmzQ8Ws%PwzV%um9O05Cl(B({XSSq8SWlwev#3q`)ajW{O zI;^>7maY`Xc3}0YQNvmSXBX@?;N3)x5tIUj_XT#Q*ihd1VYsP8UD2)PA|hb2$5e#p zEy!|E+~!x@mIb7CX~BWsL*~)zTjc*juL06Y)4w?YZbqzW22^6040U!M^Ozcw&P1_B zZ*ykM>_O?*TPOn(bhJ3=DhZ?zPRgWr;}E-gGrWh;i&yp)LU<plP3^Ki5TvkXWmP-3 zoZQF;?Fow?296oy*PhFti8)df2eDI)5K%<{u)*cy%@Em>n_mzX)s6Dm%y_vsWu%bo z2MdvwbvmCP(HhGPM#U-rLtiuhi@r_)^mSP=psyDJeZ8B%Ud=~@te8pE47JHSiZj>V zAH5~|5ts=D6+da=LJ$g*;qC{|5PHe0SU_Kg^UPBB3-wW{b3y#|5Bf^OA#mMi)l$4! zks<&H#~>|$*{?x5rs~UQH)Gl?#e!wEg2KK+A}hupgJ|u+r5q=K&Bdt zr~d*d3JLB#ZEk9Qe{IPH5@u7A1%xFNnrH~rsjUs%`p+0PXzNQp5d`x%H`870AhPQ! z-iT1tr+C28P|q#G85nsF&Oz*cImh@MGPU!lrYIX*j#aQyy(S-ICpie=t+k)uOxCPG zDp|kjvPRpq+^hQq6Y^ka=^Hww#@7h}-g}|N)>}*9ocwV?P%@$?agwWuZxrDX*bnA# z*MBL#N3=u!Oug7HHZ4GAm7tbMjF43NU->(NeB6o}=V3a*N7vsAOZp9YZkm1`1=Mfn z>)rGT32g|2HE`!Hd%+{!ze-lAaSCCuZ1s=57NLgz^%W!Q2YiG%dHFL z;TKVWoumGLIP7INyy?Uk`Ax766y2ejjdk^cmq3i7=hW|P*Xn*Rp0^WB8RFpLeTM18 zG*ut~peWG9kIuN-s9~0A614{5e=LYPq1W8377cmq!S;>o+LO2Qd2|!V%W;= zCM?f^d%Dq-K)8kDwmdj)mAxaLVgx7P_qW4o-MYwb{r~S)PYPZ;XUx3hka>ifM1c0*z zWdu-DTn31*@!{&aY5n-pv&W9678;O7yF%XP_w)awY-;;+ewed0BL5Xcp#Arolnfga zcLE6F@Sj$BIrT8A*f5;fKOwtoc5Mb8N)A`e3+QcG4D#nW;_*GMhuvsncE-osC(Eil z)}}2ecc;-eda5vmpHHysRd#hk{I1o9r0!3ye&49s5k(bk2h}Edk{4t!LZ{614W5?# zRuqrp^Znrq45@$b3)-hKMZk$6f!=eD9OHmiwTo|SJ#$R*a~;3x<{9 literal 0 HcmV?d00001 diff --git a/docs/content/en/docs/clustermgmt/observability/expose-metrics.md b/docs/content/en/docs/clustermgmt/observability/expose-metrics.md new file mode 100644 index 000000000000..a1951b2258cc --- /dev/null +++ b/docs/content/en/docs/clustermgmt/observability/expose-metrics.md @@ -0,0 +1,175 @@ +--- +title: "Expose metrics for EKS Anywhere components" +linkTitle: "Expose metrics" +weight: 100 +date: 2024-04-06 +description: > + Expose metrics for EKS Anywhere components +--- + +Some Kubernetes system components like kube-controller-manager, kube-scheduler and kube-proxy expose metrics only on the localhost by default. In order to expose metrics for these components so that other monitoring systems like Prometheus can scrape them, you can deploy a proxy as a Daemonset on the host network of the nodes. The proxy pods also need to be configured with control plane tolerations so that they can be scheduled on the control plane nodes. + +### Configure Proxy + +To configure a proxy for exposing metrics on an EKS Anywhere cluster, you can perform the following steps: + +1. Create a config map to store the proxy configuration. + + Below is an example ConfigMap if you use HAProxy as the proxy server. + ```bash + cat << EOF | kubectl apply -f - + apiVersion: v1 + kind: ConfigMap + metadata: + name: metrics-proxy + data: + haproxy.cfg: | + defaults + mode http + timeout connect 5000ms + timeout client 5000ms + timeout server 5000ms + default-server maxconn 10 + + frontend kube-proxy + bind \${NODE_IP}:10249 + http-request deny if !{ path /metrics } + default_backend kube-proxy + backend kube-proxy + server kube-proxy 127.0.0.1:10249 check + + frontend kube-controller-manager + bind \${NODE_IP}:10257 + http-request deny if !{ path /metrics } + default_backend kube-controller-manager + backend kube-controller-manager + server kube-controller-manager 127.0.0.1:10257 ssl verify none check + + frontend kube-scheduler + bind \${NODE_IP}:10259 + http-request deny if !{ path /metrics } + default_backend kube-scheduler + backend kube-scheduler + server kube-scheduler 127.0.0.1:10259 ssl verify none check + EOF + ``` + +2. Create a daemonset for the proxy and mount the config map volume onto the proxy pods. + + Below is an example configuration for the HAProxy daemonset. + ```bash + cat << EOF | kubectl apply -f - + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: metrics-proxy + spec: + selector: + matchLabels: + app: metrics-proxy + template: + metadata: + labels: + app: metrics-proxy + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + hostNetwork: true + containers: + - name: haproxy + image: public.ecr.aws/eks-anywhere/kubernetes-sigs/kind/haproxy:v0.20.0-eks-a-54 + env: + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + ports: + - name: kube-proxy + containerPort: 10249 + - name: kube-ctrl-mgr + containerPort: 10257 + - name: kube-scheduler + containerPort: 10259 + volumeMounts: + - mountPath: "/usr/local/etc/haproxy" + name: haproxy-config + volumes: + - configMap: + name: metrics-proxy + name: haproxy-config + EOF + ``` + +### Configure Client Permissions + +1. Create a new cluster role for the client to access the metrics endpoint of the components. + ```bash + cat << EOF | kubectl apply -f - + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: metrics-reader + rules: + - nonResourceURLs: + - "/metrics" + verbs: + - get + EOF + ``` + +2. Create a new cluster role binding to bind the above cluster role to the client pod's service account. + + ```bash + cat << EOF | kubectl apply -f - + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: metrics-reader-binding + subjects: + - kind: ServiceAccount + name: default + namespace: default + roleRef: + kind: ClusterRole + name: metrics-reader + apiGroup: rbac.authorization.k8s.io + EOF + ``` + +3. Verify that the metrics are exposed to the client pods by running the following commands: + ```bash + cat << EOF | kubectl apply -f - + apiVersion: v1 + kind: Pod + metadata: + name: test-pod + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + containers: + - command: + - /bin/sleep + - infinity + image: curlimages/curl:latest + name: test-container + env: + - name: NODE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + EOF + ``` + + ```bash + kubectl exec -it test-pod -- sh + export TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + curl -H "Authorization: Bearer ${TOKEN}" "http://${NODE_IP}:10257/metrics" + curl -H "Authorization: Bearer ${TOKEN}" "http://${NODE_IP}:10259/metrics" + curl -H "Authorization: Bearer ${TOKEN}" "http://${NODE_IP}:10249/metrics" + ``` \ No newline at end of file diff --git a/docs/content/en/docs/clustermgmt/observability/overview.md b/docs/content/en/docs/clustermgmt/observability/overview.md index 5f36ed9c60b5..f4674a860162 100644 --- a/docs/content/en/docs/clustermgmt/observability/overview.md +++ b/docs/content/en/docs/clustermgmt/observability/overview.md @@ -17,4 +17,5 @@ AWS offers comprehensive monitoring, logging, alarming, and dashboard capabiliti 1. [Verify EKS Anywhere cluster status]({{< relref "./cluster-verify" >}}) 1. [Use the EKS Connector to view EKS Anywhere clusters and resources in the EKS console]({{< relref "./cluster-connect" >}}) 1. [Use Fluent Bit and Container Insights to send metrics and logs to CloudWatch]({{< relref "./fluentbit-logging" >}}) -1. [Use ADOT to send metrics to AMP and AMG](https://aws.amazon.com/blogs/mt/using-curated-packages-and-aws-managed-open-source-services-to-observe-your-on-premise-kubernetes-environment/) \ No newline at end of file +1. [Use ADOT to send metrics to AMP and AMG](https://aws.amazon.com/blogs/mt/using-curated-packages-and-aws-managed-open-source-services-to-observe-your-on-premise-kubernetes-environment/) +1. [Expose metrics for EKS Anywhere components]({{< relref "./expose-metrics" >}}) \ No newline at end of file From 2a2892806a8a87c552e7a0ea093071e4426a099b Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Fri, 17 May 2024 17:33:28 -0700 Subject: [PATCH 145/193] Update docs packages instructions for airgapped env (#8164) --- docs/content/en/docs/getting-started/airgapped/airgap-steps.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/content/en/docs/getting-started/airgapped/airgap-steps.md b/docs/content/en/docs/getting-started/airgapped/airgap-steps.md index 60773faa7fac..d19b66df019f 100644 --- a/docs/content/en/docs/getting-started/airgapped/airgap-steps.md +++ b/docs/content/en/docs/getting-started/airgapped/airgap-steps.md @@ -38,6 +38,8 @@ toc_hide: true The `copy packages` command uses the credentials in your docker config file. So you must `docker login` to the source registries and the destination registry before running the command. + When using self-signed certificates for your registry, you should run with the `--dst-insecure` command line argument to indicate skipping TLS verification while copying curated packages. + ```bash eksctl anywhere copy packages \ ${REGISTRY_MIRROR_URL}/curated-packages \ From f63187487981824789129c09ec78f575aa26679e Mon Sep 17 00:00:00 2001 From: Chris Doherty Date: Sun, 19 May 2024 08:04:58 -0500 Subject: [PATCH 146/193] Update OWNERS (#8172) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Chow 👋🏻 --- OWNERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/OWNERS b/OWNERS index c025af06240f..e180957661db 100644 --- a/OWNERS +++ b/OWNERS @@ -2,7 +2,6 @@ approvers: - abhay-krishna - abhinavmpandey08 - ahreehong -- chrisdoherty4 - cxbrowne1207 - d8660091 - drewvanstone @@ -18,4 +17,4 @@ approvers: - taneyland - tatlat - vignesh-goutham -- vivek-koppuru \ No newline at end of file +- vivek-koppuru From e5215755ab19ec4b7cc530b1b3cdd48ad6b211a8 Mon Sep 17 00:00:00 2001 From: Vivek Koppuru Date: Tue, 21 May 2024 09:49:16 -0700 Subject: [PATCH 147/193] Add Changelog for v0.19.6 (#8189) --- docs/content/en/docs/whatsnew/changelog.md | 23 ++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index 0ebd4dc914fa..b2ce89767cbd 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -31,6 +31,29 @@ description: > * When upgrading to a new minor version, a new OS image must be created using the new image-builder CLI pertaining to that release. {{% /alert %}} +## [v0.19.6](https://github.com/aws/eks-anywhere/releases/tag/v0.19.6) +### Supported OS version details +| | vSphere | Bare Metal | Nutanix | CloudStack | Snow | +|:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| +| Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | +| Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | +| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | +| RHEL 9.x | — | — | ✔ | ✔ | — | +* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +### Changed +- Backporting dependency bumps to fix vulnerabilities [#8118](https://github.com/aws/eks-anywhere/pull/8118) +- Upgraded EKS-D: + - `v1-25-eks-37` to [`v1-25-eks-39`](https://distro.eks.amazonaws.com/releases/1-25/39/) + - `v1-26-eks-33` to [`v1-26-eks-35`](https://distro.eks.amazonaws.com/releases/1-26/35/) + - `v1-27-eks-27` to [`v1-27-eks-29`](https://distro.eks.amazonaws.com/releases/1-27/29/) + - `v1-28-eks-20` to [`v1-28-eks-22`](https://distro.eks.amazonaws.com/releases/1-28/22/) + - `v1-29-eks-9` to [`v1-29-eks-11`](https://distro.eks.amazonaws.com/releases/1-29/11/) + +### Fixed +- Fixed cluster directory being created with root ownership [#8120](https://github.com/aws/eks-anywhere/pull/8120) + ## [v0.19.5](https://github.com/aws/eks-anywhere/releases/tag/v0.19.5) ### Supported OS version details | | vSphere | Bare Metal | Nutanix | CloudStack | Snow | From 4b49215cc8e8688f88c747d0840900b16f4c0830 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Tue, 21 May 2024 13:18:17 -0700 Subject: [PATCH 148/193] Add 1.29 E2E for Tinkerbell (#8169) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- .../build/buildspecs/quick-test-eks-a-cli.yml | 3 + .../buildspecs/tinkerbell-test-eks-a-cli.yml | 3 + test/e2e/QUICK_TESTS.yaml | 6 +- test/e2e/SKIPPED_TESTS.yaml | 23 +- test/e2e/TINKERBELL_HARDWARE_COUNT.yaml | 87 ++-- test/e2e/tinkerbell_test.go | 488 +++++++++--------- test/framework/tinkerbell.go | 42 +- 7 files changed, 354 insertions(+), 298 deletions(-) diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index d17f786e86e6..c69cd496f74b 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -138,18 +138,21 @@ env: T_TINKERBELL_IMAGE_UBUNTU_1_27: "tinkerbell_ci:image_ubuntu_1_27" T_TINKERBELL_IMAGE_UBUNTU_1_28: "tinkerbell_ci:image_ubuntu_1_28" T_TINKERBELL_IMAGE_UBUNTU_1_29: "tinkerbell_ci:image_ubuntu_1_29" + T_TINKERBELL_IMAGE_UBUNTU_1_30: "tinkerbell_ci:image_ubuntu_1_30" T_TINKERBELL_IMAGE_UBUNTU_2204_1_24: "tinkerbell_ci:image_ubuntu_2204_1_24" T_TINKERBELL_IMAGE_UBUNTU_2204_1_25: "tinkerbell_ci:image_ubuntu_2204_1_25" T_TINKERBELL_IMAGE_UBUNTU_2204_1_26: "tinkerbell_ci:image_ubuntu_2204_1_26" T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_30: "tinkerbell_ci:image_ubuntu_2204_1_30" T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" T_TINKERBELL_IMAGE_REDHAT_1_26: "tinkerbell_ci:image_redhat_1_26" T_TINKERBELL_IMAGE_REDHAT_1_27: "tinkerbell_ci:image_redhat_1_27" T_TINKERBELL_IMAGE_REDHAT_1_28: "tinkerbell_ci:image_redhat_1_28" T_TINKERBELL_IMAGE_REDHAT_1_29: "tinkerbell_ci:image_redhat_1_29" + T_TINKERBELL_IMAGE_REDHAT_1_30: "tinkerbell_ci:image_redhat_1_30" T_TINKERBELL_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" T_TINKERBELL_CP_NETWORK_CIDR: "tinkerbell_ci:cp_network_cidr" T_TINKERBELL_S3_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_inventory_csv" diff --git a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml index 128163ae111d..67f9c5ffdddb 100644 --- a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml @@ -37,18 +37,21 @@ env: T_TINKERBELL_IMAGE_UBUNTU_1_27: "tinkerbell_ci:image_ubuntu_1_27" T_TINKERBELL_IMAGE_UBUNTU_1_28: "tinkerbell_ci:image_ubuntu_1_28" T_TINKERBELL_IMAGE_UBUNTU_1_29: "tinkerbell_ci:image_ubuntu_1_29" + T_TINKERBELL_IMAGE_UBUNTU_1_30: "tinkerbell_ci:image_ubuntu_1_30" T_TINKERBELL_IMAGE_UBUNTU_2204_1_24: "tinkerbell_ci:image_ubuntu_2204_1_24" T_TINKERBELL_IMAGE_UBUNTU_2204_1_25: "tinkerbell_ci:image_ubuntu_2204_1_25" T_TINKERBELL_IMAGE_UBUNTU_2204_1_26: "tinkerbell_ci:image_ubuntu_2204_1_26" T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_30: "tinkerbell_ci:image_ubuntu_2204_1_30" T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" T_TINKERBELL_IMAGE_REDHAT_1_26: "tinkerbell_ci:image_redhat_1_26" T_TINKERBELL_IMAGE_REDHAT_1_27: "tinkerbell_ci:image_redhat_1_27" T_TINKERBELL_IMAGE_REDHAT_1_28: "tinkerbell_ci:image_redhat_1_28" T_TINKERBELL_IMAGE_REDHAT_1_29: "tinkerbell_ci:image_redhat_1_29" + T_TINKERBELL_IMAGE_REDHAT_1_30: "tinkerbell_ci:image_redhat_1_30" T_TINKERBELL_SSH_AUTHORIZED_KEY: "vsphere_ci_beta_connection:ssh_authorized_key" T_TINKERBELL_CP_NETWORK_CIDR: "tinkerbell_ci:cp_network_cidr" T_TINKERBELL_S3_INVENTORY_CSV_KEY: "tinkerbell_ci:s3_inventory_csv" diff --git a/test/e2e/QUICK_TESTS.yaml b/test/e2e/QUICK_TESTS.yaml index 4fd1f730e565..e59cb8184cbc 100644 --- a/test/e2e/QUICK_TESTS.yaml +++ b/test/e2e/QUICK_TESTS.yaml @@ -25,6 +25,6 @@ quick_tests: # - TestSnowKubernetes128SimpleFlow # - TestSnowKubernetes128StackedEtcdSimpleFlow # Tinkerbell -- ^TestTinkerbellKubernetes128UbuntuTo129Upgrade$ -- TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade -- TestTinkerbellKubernetes128To129Ubuntu2204Upgrade \ No newline at end of file +- ^TestTinkerbellKubernetes129UbuntuTo130Upgrade$ +- TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade +- TestTinkerbellKubernetes129To130Ubuntu2204Upgrade \ No newline at end of file diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index f8f141b41312..22de9114567d 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -77,19 +77,18 @@ skipped_tests: - TestTinkerbellKubernetes126UbuntuExternalEtcdSimpleFlow # Skipping skip power action tests - Not going to work because e2e test powers on CP and worker node at the same time and worker node times out early waiting for ipxe # Skipping a few redundant tests -- TestTinkerbellKubernetes125RedHatSimpleFlow - TestTinkerbellKubernetes126RedHatSimpleFlow - TestTinkerbellKubernetes127RedHatSimpleFlow - TestTinkerbellKubernetes128RedHatSimpleFlow - TestTinkerbellKubernetes129RedHatSimpleFlow -- TestTinkerbellKubernetes125UbuntuSimpleFlow +- TestTinkerbellKubernetes130RedHatSimpleFlow - TestTinkerbellKubernetes126UbuntuSimpleFlow -- TestTinkerbellKubernetes125Ubuntu2204SimpleFlow +- TestTinkerbellKubernetes127UbuntuSimpleFlow - TestTinkerbellKubernetes126Ubuntu2204SimpleFlow -- TestTinkerbellKubernetes125To126Ubuntu2204Upgrade +- TestTinkerbellKubernetes127Ubuntu2204SimpleFlow +- TestTinkerbellKubernetes126To127Ubuntu2204Upgrade - TestTinkerbellKubernetes126Ubuntu2004To2204Upgrade -- TestTinkerbellKubernetes126UbuntuThreeWorkersSimpleFlow -- TestTinkerbellKubernetes125UbuntuWorkerNodeScaleUpWithAPI +- TestTinkerbellKubernetes127Ubuntu2004To2204Upgrade - TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI - TestTinkerbellSingleNode125ManagementScaleupWorkloadWithAPI #Skip single K8s version upgrade tests as the same is covered by multiple K8s version upgrade from 1.25 to 1.29 to save on hardware resources and running time @@ -111,12 +110,12 @@ skipped_tests: - TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesHarborFlow - TestTinkerbellKubernetes127UbuntuCuratedPackagesAdotSimpleFlow - TestTinkerbellKubernetes127UbuntuCuratedPackagesPrometheusSimpleFlow -- TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesFlow -- TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesEmissaryFlow -- TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesHarborFlow -- TestTinkerbellKubernetes129UbuntuCuratedPackagesAdotSimpleFlow -- TestTinkerbellKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow -- TestTinkerbellKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow +- TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesFlow +- TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesEmissaryFlow +- TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesHarborFlow +- TestTinkerbellKubernetes130UbuntuCuratedPackagesAdotSimpleFlow +- TestTinkerbellKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow +- TestTinkerbellKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow # Tinkerbell conformance - TestTinkerbellKubernetes125BottleRocketThreeReplicasTwoWorkersConformanceFlow diff --git a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml index ec5a7b189bdf..49d1f909819b 100644 --- a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml +++ b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml @@ -1,7 +1,7 @@ -TestTinkerbellKubernetes129AWSIamAuth: 2 +TestTinkerbellKubernetes130AWSIamAuth: 2 TestTinkerbellKubernetes128BottleRocketAWSIamAuth: 2 -TestTinkerbellKubernetes129UbuntuWorkerNodeUpgrade: 3 -TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUpWithAPI: 3 +TestTinkerbellKubernetes130UbuntuWorkerNodeUpgrade: 3 +TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUpWithAPI: 3 TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI: 2 TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesFlow: 1 TestTinkerbellKubernetes127BottleRocketSingleNodeCuratedPackagesFlow: 1 @@ -34,97 +34,98 @@ TestTinkerbellKubernetes125UbuntuCuratedPackagesAdotSimpleFlow: 1 TestTinkerbellKubernetes125BottleRocketCuratedPackagesAdotSimpleFlow: 1 TestTinkerbellKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow: 1 TestTinkerbellKubernetes125BottleRocketCuratedPackagesPrometheusSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesFlow: 1 +TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesFlow: 1 TestTinkerbellKubernetes128BottleRocketSingleNodeCuratedPackagesFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesEmissaryFlow: 1 +TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesEmissaryFlow: 1 TestTinkerbellKubernetes128BottleRocketSingleNodeCuratedPackagesEmissaryFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesHarborFlow: 1 +TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesHarborFlow: 1 TestTinkerbellKubernetes128BottleRocketSingleNodeCuratedPackagesHarborFlow: 1 -TestTinkerbellKubernetes129UbuntuCuratedPackagesAdotSimpleFlow: 1 +TestTinkerbellKubernetes130UbuntuCuratedPackagesAdotSimpleFlow: 1 TestTinkerbellKubernetes128BottleRocketCuratedPackagesAdotSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow: 1 +TestTinkerbellKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow: 1 TestTinkerbellKubernetes128BottleRocketCuratedPackagesPrometheusSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow: 3 +TestTinkerbellKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow: 3 TestTinkerbellKubernetes128BottleRocketSingleNodeSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuSingleNodeSimpleFlow: 1 -TestTinkerbellKubernetes129UbuntuWorkloadCluster: 4 -TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI: 4 -TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI: 4 +TestTinkerbellKubernetes130UbuntuSingleNodeSimpleFlow: 1 +TestTinkerbellKubernetes130UbuntuWorkloadCluster: 4 +TestTinkerbellKubernetes130UbuntuWorkloadClusterWithAPI: 4 +TestTinkerbellKubernetes130UbuntuWorkloadClusterGitFluxWithAPI: 4 TestTinkerbellKubernetes128BottlerocketWorkloadClusterSimpleFlow: 4 TestTinkerbellKubernetes128BottlerocketWorkloadClusterWithAPI: 4 -TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster: 2 -TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI: 2 +TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadCluster: 2 +TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadClusterWithAPI: 2 TestTinkerbellKubernetes128BottlerocketSingleNodeWorkloadCluster: 2 TestTinkerbellKubernetes128BottlerocketSingleNodeWorkloadClusterWithAPI: 2 TestTinkerbellKubernetes128BottlerocketWorkloadClusterSkipPowerActions: 4 -TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleup: 5 +TestTinkerbellUpgrade130MulticlusterWorkloadClusterWorkerScaleup: 5 TestTinkerbellSingleNode125ManagementScaleupWorkloadWithAPI: 4 -TestTinkerbellKubernetes125UbuntuTo126Upgrade: 4 TestTinkerbellKubernetes126UbuntuTo127Upgrade: 4 TestTinkerbellKubernetes127UbuntuTo128Upgrade: 4 TestTinkerbellKubernetes128UbuntuTo129Upgrade: 4 -TestTinkerbellKubernetes125To126Ubuntu2204Upgrade: 4 +TestTinkerbellKubernetes129UbuntuTo130Upgrade: 4 TestTinkerbellKubernetes126To127Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes127To128Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes128To129Ubuntu2204Upgrade: 4 +TestTinkerbellKubernetes129To130Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes126Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes127Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes128Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade: 4 -TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI: 4 -TestTinkerbellUpgrade129MulticlusterWorkloadClusterCPScaleup: 6 -TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129: 6 +TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade: 4 +TestTinkerbellUpgrade130MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI: 4 +TestTinkerbellUpgrade130MulticlusterWorkloadClusterCPScaleup: 6 +TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade129To130: 6 TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade125To126WithAPI: 4 -TestTinkerbellKubernetes129OIDC: 2 -TestTinkerbellKubernetes129UbuntuRegistryMirror: 2 -TestTinkerbellKubernetes129UbuntuInsecureSkipVerifyRegistryMirror: 2 +TestTinkerbellKubernetes130OIDC: 2 +TestTinkerbellKubernetes130UbuntuRegistryMirror: 2 +TestTinkerbellKubernetes130UbuntuInsecureSkipVerifyRegistryMirror: 2 TestTinkerbellKubernetes128BottlerocketRegistryMirror: 2 -TestTinkerbellKubernetes129UbuntuAuthenticatedRegistryMirror: 2 +TestTinkerbellKubernetes130UbuntuAuthenticatedRegistryMirror: 2 TestTinkerbellKubernetes128BottlerocketAuthenticatedRegistryMirror: 2 -TestTinkerbellKubernetes125UbuntuSimpleFlow: 2 TestTinkerbellKubernetes126UbuntuSimpleFlow: 2 TestTinkerbellKubernetes127UbuntuSimpleFlow: 2 TestTinkerbellKubernetes128UbuntuSimpleFlow: 2 TestTinkerbellKubernetes129UbuntuSimpleFlow: 2 -TestTinkerbellKubernetes125Ubuntu2204SimpleFlow: 2 +TestTinkerbellKubernetes130UbuntuSimpleFlow: 2 TestTinkerbellKubernetes126Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes127Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes128Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes129Ubuntu2204SimpleFlow: 2 -TestTinkerbellKubernetes125RedHatSimpleFlow: 2 +TestTinkerbellKubernetes130Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes126RedHatSimpleFlow: 2 TestTinkerbellKubernetes127RedHatSimpleFlow: 2 TestTinkerbellKubernetes128RedHatSimpleFlow: 2 TestTinkerbellKubernetes129RedHatSimpleFlow: 2 +TestTinkerbellKubernetes130RedHatSimpleFlow: 2 TestTinkerbellKubernetes125BottleRocketSimpleFlow: 2 TestTinkerbellKubernetes126BottleRocketSimpleFlow: 2 TestTinkerbellKubernetes127BottleRocketSimpleFlow: 2 TestTinkerbellKubernetes128BottleRocketSimpleFlow: 2 -TestTinkerbellKubernetes129UbuntuThreeControlPlaneReplicasSimpleFlow: 4 +TestTinkerbellKubernetes130UbuntuThreeControlPlaneReplicasSimpleFlow: 4 TestTinkerbellKubernetes128BottleRocketThreeControlPlaneReplicasSimpleFlow: 4 -TestTinkerbellKubernetes129UbuntuThreeWorkersSimpleFlow: 4 +TestTinkerbellKubernetes130UbuntuThreeWorkersSimpleFlow: 4 TestTinkerbellKubernetes128BottleRocketThreeWorkersSimpleFlow: 4 -TestTinkerbellKubernetes129UbuntuControlPlaneScaleUp: 4 -TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUp: 3 -TestTinkerbellKubernetes129UbuntuWorkerNodeScaleDown: 3 -TestTinkerbellKubernetes129UbuntuControlPlaneScaleDown: 4 -TestTinkerbellKubernetes129UbuntuWorkerNodeGroupsTaintsAndLabels: 3 +TestTinkerbellKubernetes130UbuntuControlPlaneScaleUp: 4 +TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUp: 3 +TestTinkerbellKubernetes130UbuntuWorkerNodeScaleDown: 3 +TestTinkerbellKubernetes130UbuntuControlPlaneScaleDown: 4 +TestTinkerbellKubernetes130UbuntuWorkerNodeGroupsTaintsAndLabels: 3 TestTinkerbellKubernetes128BottlerocketWorkerNodeGroupsTaintsAndLabels: 3 TestTinkerbellAirgappedKubernetes128BottleRocketRegistryMirror: 2 TestTinkerbellAirgappedKubernetes128BottlerocketProxyConfigFlow: 2 -TestTinkerbellAirgappedKubernetes129UbuntuProxyConfigFlow: 2 -TestTinkerbellKubernetes129UbuntuOOB: 2 -TestTinkerbellK8sUpgrade128to129WithUbuntuOOB: 4 -TestTinkerbellKubernetes128UbuntuTo129UpgradeCPOnly: 3 -TestTinkerbellKubernetes127UbuntuTo128UpgradeWorkerOnly: 3 -TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI: 4 -TestTinkerbellKubernetes125UbuntuTo126InPlaceUpgrade_1CP_1Worker: 2 +TestTinkerbellAirgappedKubernetes130UbuntuProxyConfigFlow: 2 +TestTinkerbellKubernetes130UbuntuOOB: 2 +TestTinkerbellK8sUpgrade129to130WithUbuntuOOB: 4 +TestTinkerbellKubernetes129UbuntuTo130UpgradeCPOnly: 3 +TestTinkerbellKubernetes129UbuntuTo130UpgradeWorkerOnly: 3 +TestTinkerbellSingleNode129To130UbuntuManagementCPUpgradeAPI: 4 TestTinkerbellKubernetes126UbuntuTo127InPlaceUpgrade_1CP_2Worker: 3 TestTinkerbellKubernetes127UbuntuTo128InPlaceUpgrade_3CP_1Worker: 4 TestTinkerbellKubernetes128UbuntuTo129InPlaceUpgrade_1CP_1Worker: 2 -TestTinkerbellKubernetes125UbuntuTo126SingleNodeInPlaceUpgrade: 1 +TestTinkerbellKubernetes129UbuntuTo130InPlaceUpgrade_1CP_1Worker: 2 TestTinkerbellKubernetes126UbuntuTo127SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes127UbuntuTo128SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes128UbuntuTo129SingleNodeInPlaceUpgrade: 1 +TestTinkerbellKubernetes129UbuntuTo130SingleNodeInPlaceUpgrade: 1 TestTinkerbellKubernetes128UpgradeManagementComponents: 2 TestTinkerbellKubernetes125UbuntuTo129MultipleUpgrade: 4 \ No newline at end of file diff --git a/test/e2e/tinkerbell_test.go b/test/e2e/tinkerbell_test.go index 99cc9281942f..bf25eafc4a0c 100644 --- a/test/e2e/tinkerbell_test.go +++ b/test/e2e/tinkerbell_test.go @@ -19,12 +19,12 @@ import ( // AWS IAM Auth -func TestTinkerbellKubernetes129AWSIamAuth(t *testing.T) { +func TestTinkerbellKubernetes130AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), ) @@ -32,25 +32,6 @@ func TestTinkerbellKubernetes129AWSIamAuth(t *testing.T) { } // Upgrade -func TestTinkerbellKubernetes125UbuntuTo126Upgrade(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithControlPlaneHardware(2), - framework.WithWorkerHardware(2), - ) - runSimpleUpgradeFlowForBareMetal( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(framework.Ubuntu126Image()), - ) -} - func TestTinkerbellKubernetes126UbuntuTo127Upgrade(t *testing.T) { provider := framework.NewTinkerbell(t, framework.WithUbuntu126Tinkerbell()) test := framework.NewClusterE2ETest( @@ -108,71 +89,70 @@ func TestTinkerbellKubernetes128UbuntuTo129Upgrade(t *testing.T) { ) } -func TestTinkerbellKubernetes128UbuntuTo129UpgradeCPOnly(t *testing.T) { - provider := framework.NewTinkerbell(t) - kube128 := v1alpha1.Kube128 +func TestTinkerbellKubernetes129UbuntuTo130Upgrade(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube128)), framework.WithControlPlaneHardware(2), - framework.WithWorkerHardware(1), - ).WithClusterConfig( - provider.WithCPKubeVersionAndOS(kube128, framework.Ubuntu2004), - provider.WithWorkerKubeVersionAndOS(kube128, framework.Ubuntu2004), + framework.WithWorkerHardware(2), ) - runSimpleUpgradeFlowWorkerNodeVersionForBareMetal( + runSimpleUpgradeFlowForBareMetal( test, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), - provider.WithProviderUpgrade(framework.Ubuntu129ImageForCP()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } -func TestTinkerbellKubernetes127UbuntuTo128UpgradeWorkerOnly(t *testing.T) { +func TestTinkerbellKubernetes129UbuntuTo130UpgradeCPOnly(t *testing.T) { provider := framework.NewTinkerbell(t) - kube127 := v1alpha1.Kube127 - kube128 := v1alpha1.Kube128 + kube129 := v1alpha1.Kube129 test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(), - framework.WithClusterFiller(api.WithKubernetesVersion(kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube127)), - framework.WithControlPlaneHardware(1), - framework.WithWorkerHardware(2), + framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube129)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(1), ).WithClusterConfig( - provider.WithCPKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2004), - provider.WithWorkerKubeVersionAndOS(v1alpha1.Kube127, framework.Ubuntu2004), + provider.WithCPKubeVersionAndOS(kube129, framework.Ubuntu2004), + provider.WithWorkerKubeVersionAndOS(kube129, framework.Ubuntu2004), ) runSimpleUpgradeFlowWorkerNodeVersionForBareMetal( test, - framework.WithClusterUpgrade(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube128)), - provider.WithProviderUpgrade(framework.Ubuntu128ImageForWorker()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130ImageForCP()), ) } -func TestTinkerbellKubernetes125To126Ubuntu2204Upgrade(t *testing.T) { +func TestTinkerbellKubernetes129UbuntuTo130UpgradeWorkerOnly(t *testing.T) { provider := framework.NewTinkerbell(t) + kube129 := v1alpha1.Kube129 + kube130 := v1alpha1.Kube130 test := framework.NewClusterE2ETest( t, provider, + framework.WithClusterFiller(), + framework.WithClusterFiller(api.WithKubernetesVersion(kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithControlPlaneHardware(2), + framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube129)), + framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(2), ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), + provider.WithCPKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004), + provider.WithWorkerKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004), ) - runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + runSimpleUpgradeFlowWorkerNodeVersionForBareMetal( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes126Image()), + framework.WithClusterUpgrade(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130ImageForWorker()), ) } @@ -236,6 +216,26 @@ func TestTinkerbellKubernetes128To129Ubuntu2204Upgrade(t *testing.T) { ) } +func TestTinkerbellKubernetes129To130Ubuntu2204Upgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes130Image()), + ) +} + func TestTinkerbellKubernetes126Ubuntu2004To2204Upgrade(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( @@ -316,12 +316,32 @@ func TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade(t *testing.T) { ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeUpgrade(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes130Image()), + ) +} + +func TestTinkerbellKubernetes130UbuntuWorkerNodeUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(1), @@ -329,17 +349,17 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeUpgrade(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(2)), ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUpWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUpWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(1), @@ -380,27 +400,6 @@ func TestTinkerbellKubernetes125UbuntuAddWorkerNodeGroupWithAPI(t *testing.T) { ) } -func TestTinkerbellKubernetes125UbuntuTo126InPlaceUpgrade_1CP_1Worker(t *testing.T) { - provider := framework.NewTinkerbell(t) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - framework.WithClusterFiller(api.WithInPlaceUpgradeStrategy()), - framework.WithControlPlaneHardware(1), - framework.WithWorkerHardware(1), - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), - ) - runInPlaceUpgradeFlowForBareMetal( - test, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126), api.WithInPlaceUpgradeStrategy()), - provider.WithProviderUpgrade(framework.Ubuntu126Image()), - ) -} - func TestTinkerbellKubernetes126UbuntuTo127InPlaceUpgrade_1CP_2Worker(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( @@ -464,32 +463,24 @@ func TestTinkerbellKubernetes128UbuntuTo129InPlaceUpgrade_1CP_1Worker(t *testing ) } -func TestTinkerbellKubernetes125UbuntuTo126SingleNodeInPlaceUpgrade(t *testing.T) { +func TestTinkerbellKubernetes129UbuntuTo130InPlaceUpgrade_1CP_1Worker(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithEtcdCountIfExternal(0)), - framework.WithClusterFiller(api.RemoveAllWorkerNodeGroups()), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithInPlaceUpgradeStrategy()), framework.WithControlPlaneHardware(1), + framework.WithWorkerHardware(1), ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), ) runInPlaceUpgradeFlowForBareMetal( test, - framework.WithUpgradeClusterConfig( - api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube126), - api.WithInPlaceUpgradeStrategy(), - ), - api.TinkerbellToConfigFiller( - api.RemoveTinkerbellWorkerMachineConfig(), - ), - ), - provider.WithProviderUpgrade(framework.Ubuntu126Image()), + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithInPlaceUpgradeStrategy()), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } @@ -580,6 +571,35 @@ func TestTinkerbellKubernetes128UbuntuTo129SingleNodeInPlaceUpgrade(t *testing.T ) } +func TestTinkerbellKubernetes129UbuntuTo130SingleNodeInPlaceUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(0)), + framework.WithClusterFiller(api.RemoveAllWorkerNodeGroups()), + framework.WithClusterFiller(api.WithInPlaceUpgradeStrategy()), + framework.WithControlPlaneHardware(1), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + ) + runInPlaceUpgradeFlowForBareMetal( + test, + framework.WithUpgradeClusterConfig( + api.ClusterToConfigFiller( + api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithInPlaceUpgradeStrategy(), + ), + api.TinkerbellToConfigFiller( + api.RemoveTinkerbellWorkerMachineConfig(), + ), + ), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), + ) +} + // Curated packages func TestTinkerbellKubernetes127UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, @@ -783,90 +803,90 @@ func TestTinkerbellKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *tes runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackageInstallTinkerbellSingleNodeFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesEmissaryFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackageEmissaryInstallTinkerbellSingleNodeFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeCuratedPackagesHarborFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackageHarborInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackagesAdotInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterSingleNode(v1alpha1.Kube129), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterSingleNode(v1alpha1.Kube130), framework.WithControlPlaneHardware(1), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runCuratedPackagesPrometheusInstallTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) minNodes := 1 maxNodes := 2 test := framework.NewClusterE2ETest(t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(2), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) runAutoscalerWithMetricsServerTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuSingleNodeSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), @@ -878,28 +898,28 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeSimpleFlow(t *testing.T) { } // Multicluster -func TestTinkerbellKubernetes129UbuntuWorkloadCluster(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkloadCluster(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(2), framework.WithWorkerHardware(2), ), framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ), ) runTinkerbellWorkloadClusterFlow(test) } -func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkloadClusterWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -907,7 +927,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { framework.WithWorkerHardware(2), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), ), ) test := framework.NewMulticlusterE2ETest( @@ -921,7 +941,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), ), ), @@ -929,8 +949,8 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterWithAPI(t *testing.T) { runWorkloadClusterWithAPIFlowForBareMetal(test) } -func TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -941,7 +961,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T ).WithClusterConfig( framework.WithFluxGithubConfig(), api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), ), ) test := framework.NewMulticlusterE2ETest( @@ -955,7 +975,7 @@ func TestTinkerbellKubernetes129UbuntuWorkloadClusterGitFluxWithAPI(t *testing.T framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), ), ), @@ -1014,15 +1034,15 @@ func TestTinkerbellKubernetes128BottlerocketWorkloadClusterWithAPI(t *testing.T) runWorkloadClusterWithAPIFlowForBareMetal(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadCluster(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), ), @@ -1033,7 +1053,7 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster(t *testing.T) { t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), ), @@ -1042,8 +1062,8 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadCluster(t *testing.T) { runTinkerbellWorkloadClusterFlow(test) } -func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuSingleNodeWorkloadClusterWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -1051,7 +1071,7 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testin framework.WithWorkerHardware(0), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), ), @@ -1067,7 +1087,7 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testin framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), @@ -1077,8 +1097,8 @@ func TestTinkerbellKubernetes129UbuntuSingleNodeWorkloadClusterWithAPI(t *testin runWorkloadClusterWithAPIFlowForBareMetal(test) } -func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellUpgrade130MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -1089,7 +1109,7 @@ func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWith ).WithClusterConfig( framework.WithFluxGithubConfig(), api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.RemoveAllWorkerNodeGroups(), ), ) @@ -1104,7 +1124,7 @@ func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWith framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), ), ), @@ -1116,65 +1136,65 @@ func TestTinkerbellUpgrade129MulticlusterWorkloadClusterWorkerScaleupGitFluxWith ) } -func TestTinkerbellUpgrade129MulticlusterWorkloadClusterCPScaleup(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellUpgrade130MulticlusterWorkloadClusterCPScaleup(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(4), framework.WithWorkerHardware(2), ), framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ), ) runSimpleWorkloadUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), ), ) } -func TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade128To129(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()) +func TestTinkerbellUpgradeMulticlusterWorkloadClusterK8sUpgrade129To130(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) test := framework.NewMulticlusterE2ETest( t, framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithControlPlaneHardware(3), framework.WithWorkerHardware(3), ), framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ), ) runSimpleWorkloadUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), - provider.WithProviderUpgrade(framework.Ubuntu129Image()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } // OIDC -func TestTinkerbellKubernetes129OIDC(t *testing.T) { +func TestTinkerbellKubernetes130OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), ) @@ -1182,11 +1202,11 @@ func TestTinkerbellKubernetes129OIDC(t *testing.T) { } // Registry mirror -func TestTinkerbellKubernetes129UbuntuRegistryMirror(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), framework.WithRegistryMirrorEndpointAndCert(constants.TinkerbellProviderName), @@ -1194,11 +1214,11 @@ func TestTinkerbellKubernetes129UbuntuRegistryMirror(t *testing.T) { runTinkerbellRegistryMirrorFlow(test) } -func TestTinkerbellKubernetes129UbuntuInsecureSkipVerifyRegistryMirror(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuInsecureSkipVerifyRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), framework.WithRegistryMirrorInsecureSkipVerify(constants.TinkerbellProviderName), @@ -1206,11 +1226,11 @@ func TestTinkerbellKubernetes129UbuntuInsecureSkipVerifyRegistryMirror(t *testin runTinkerbellRegistryMirrorFlow(test) } -func TestTinkerbellKubernetes129UbuntuAuthenticatedRegistryMirror(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuAuthenticatedRegistryMirror(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), framework.WithAuthenticatedRegistryMirror(constants.TinkerbellProviderName), @@ -1219,17 +1239,6 @@ func TestTinkerbellKubernetes129UbuntuAuthenticatedRegistryMirror(t *testing.T) } // Simpleflow -func TestTinkerbellKubernetes125UbuntuSimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewTinkerbell(t, framework.WithUbuntu125Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithControlPlaneHardware(1), - framework.WithWorkerHardware(1), - ) - runTinkerbellSimpleFlow(test) -} - func TestTinkerbellKubernetes126UbuntuSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1274,17 +1283,15 @@ func TestTinkerbellKubernetes129UbuntuSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes125Ubuntu2204SimpleFlow(t *testing.T) { - provider := framework.NewTinkerbell(t) +func TestTinkerbellKubernetes130UbuntuSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), - ).WithClusterConfig( - provider.WithKubeVersionAndOS(v1alpha1.Kube125, framework.Ubuntu2204, nil), ) - runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) + runTinkerbellSimpleFlow(test) } func TestTinkerbellKubernetes126Ubuntu2204SimpleFlow(t *testing.T) { @@ -1339,15 +1346,17 @@ func TestTinkerbellKubernetes129Ubuntu2204SimpleFlow(t *testing.T) { runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) } -func TestTinkerbellKubernetes125RedHatSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130Ubuntu2204SimpleFlow(t *testing.T) { + provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithRedHat125Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + provider, framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2204, nil), ) - runTinkerbellSimpleFlow(test) + runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) } func TestTinkerbellKubernetes126RedHatSimpleFlow(t *testing.T) { @@ -1394,6 +1403,17 @@ func TestTinkerbellKubernetes129RedHatSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } +func TestTinkerbellKubernetes130RedHatSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewTinkerbell(t, framework.WithRedHat130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithControlPlaneHardware(1), + framework.WithWorkerHardware(1), + ) + runTinkerbellSimpleFlow(test) +} + func TestTinkerbellKubernetes128BottleRocketSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1405,11 +1425,11 @@ func TestTinkerbellKubernetes128BottleRocketSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuThreeControlPlaneReplicasSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuThreeControlPlaneReplicasSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithControlPlaneHardware(3), @@ -1418,11 +1438,11 @@ func TestTinkerbellKubernetes129UbuntuThreeControlPlaneReplicasSimpleFlow(t *tes runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuThreeWorkersSimpleFlow(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuThreeWorkersSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithWorkerNodeCount(3)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithControlPlaneHardware(1), @@ -1431,12 +1451,12 @@ func TestTinkerbellKubernetes129UbuntuThreeWorkersSimpleFlow(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellKubernetes129UbuntuControlPlaneScaleUp(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuControlPlaneScaleUp(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(3), @@ -1444,17 +1464,17 @@ func TestTinkerbellKubernetes129UbuntuControlPlaneScaleUp(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUp(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkerNodeScaleUp(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(1), @@ -1462,17 +1482,17 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleUp(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(2)), ) } -func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleDown(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuWorkerNodeScaleDown(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(2)), framework.WithControlPlaneHardware(1), @@ -1480,17 +1500,17 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeScaleDown(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), ) } -func TestTinkerbellKubernetes129UbuntuControlPlaneScaleDown(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) +func TestTinkerbellKubernetes130UbuntuControlPlaneScaleDown(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithControlPlaneHardware(3), @@ -1498,18 +1518,18 @@ func TestTinkerbellKubernetes129UbuntuControlPlaneScaleDown(t *testing.T) { ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithControlPlaneCount(1)), ) } // Worker nodegroup taints and labels -func TestTinkerbellKubernetes129UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewTinkerbell( t, - framework.WithUbuntu129Tinkerbell(), + framework.WithUbuntu130Tinkerbell(), framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel1), framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel2), ), @@ -1537,23 +1557,23 @@ func TestTinkerbellKubernetes129UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing // Proxy tests -func TestTinkerbellAirgappedKubernetes129UbuntuProxyConfigFlow(t *testing.T) { +func TestTinkerbellAirgappedKubernetes130UbuntuProxyConfigFlow(t *testing.T) { localIp, err := networkutils.GetLocalIP() if err != nil { t.Fatalf("Cannot get admin machine local IP: %v", err) } t.Logf("Admin machine's IP is: %s", localIp) - kubeVersion := strings.Replace(string(v1alpha1.Kube129), ".", "-", 1) + kubeVersion := strings.Replace(string(v1alpha1.Kube130), ".", "-", 1) test := framework.NewClusterE2ETest( t, framework.NewTinkerbell(t, - framework.WithUbuntu129Tinkerbell(), + framework.WithUbuntu130Tinkerbell(), framework.WithHookImagesURLPath("http://"+localIp.String()+":8080"), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), ), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), @@ -1564,11 +1584,11 @@ func TestTinkerbellAirgappedKubernetes129UbuntuProxyConfigFlow(t *testing.T) { } // OOB test -func TestTinkerbellKubernetes129UbuntuOOB(t *testing.T) { +func TestTinkerbellKubernetes130UbuntuOOB(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithOOBConfiguration(), framework.WithControlPlaneHardware(1), framework.WithWorkerHardware(1), @@ -1576,26 +1596,26 @@ func TestTinkerbellKubernetes129UbuntuOOB(t *testing.T) { runTinkerbellSimpleFlow(test) } -func TestTinkerbellK8sUpgrade128to129WithUbuntuOOB(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()) +func TestTinkerbellK8sUpgrade129to130WithUbuntuOOB(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithOOBConfiguration(), framework.WithControlPlaneHardware(2), framework.WithWorkerHardware(2), ) runSimpleUpgradeFlowForBareMetal( test, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), - provider.WithProviderUpgrade(framework.Ubuntu129Image()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(framework.Ubuntu130Image()), ) } -func TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI(t *testing.T) { - provider := framework.NewTinkerbell(t, framework.WithUbuntu128Tinkerbell()) +func TestTinkerbellSingleNode129To130UbuntuManagementCPUpgradeAPI(t *testing.T) { + provider := framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()) managementCluster := framework.NewClusterE2ETest( t, provider, @@ -1603,7 +1623,7 @@ func TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI(t *testing.T) framework.WithWorkerHardware(2), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithEtcdCountIfExternal(0), api.RemoveAllWorkerNodeGroups(), @@ -1616,10 +1636,10 @@ func TestTinkerbellSingleNode128To129UbuntuManagementCPUpgradeAPI(t *testing.T) runWorkloadClusterUpgradeFlowWithAPIForBareMetal( test, api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), ), - provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + provider.WithKubeVersionAndOS(v1alpha1.Kube130, framework.Ubuntu2004, nil), ) } diff --git a/test/framework/tinkerbell.go b/test/framework/tinkerbell.go index 1d56f12ca79f..8c521240c979 100644 --- a/test/framework/tinkerbell.go +++ b/test/framework/tinkerbell.go @@ -16,24 +16,24 @@ const ( tinkerbellProviderName = "tinkerbell" tinkerbellBootstrapIPEnvVar = "T_TINKERBELL_BOOTSTRAP_IP" tinkerbellControlPlaneNetworkCidrEnvVar = "T_TINKERBELL_CP_NETWORK_CIDR" - tinkerbellImageUbuntu124EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_24" tinkerbellImageUbuntu125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_25" tinkerbellImageUbuntu126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_26" tinkerbellImageUbuntu127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_27" tinkerbellImageUbuntu128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_28" tinkerbellImageUbuntu129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_29" - tinkerbellImageUbuntu2204Kubernetes124EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_24" + tinkerbellImageUbuntu130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_30" tinkerbellImageUbuntu2204Kubernetes125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_25" tinkerbellImageUbuntu2204Kubernetes126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_26" tinkerbellImageUbuntu2204Kubernetes127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_27" tinkerbellImageUbuntu2204Kubernetes128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_28" tinkerbellImageUbuntu2204Kubernetes129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_29" - tinkerbellImageRedHat124EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_24" + tinkerbellImageUbuntu2204Kubernetes130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_30" tinkerbellImageRedHat125EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_25" tinkerbellImageRedHat126EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_26" tinkerbellImageRedHat127EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_27" tinkerbellImageRedHat128EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_28" tinkerbellImageRedHat129EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_29" + tinkerbellImageRedHat130EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_30" tinkerbellInventoryCsvFilePathEnvVar = "T_TINKERBELL_INVENTORY_CSV" tinkerbellSSHAuthorizedKey = "T_TINKERBELL_SSH_AUTHORIZED_KEY" TinkerbellCIEnvironment = "T_TINKERBELL_CI_ENVIRONMENT" @@ -43,24 +43,24 @@ const ( var requiredTinkerbellEnvVars = []string{ tinkerbellControlPlaneNetworkCidrEnvVar, - tinkerbellImageUbuntu124EnvVar, tinkerbellImageUbuntu125EnvVar, tinkerbellImageUbuntu126EnvVar, tinkerbellImageUbuntu127EnvVar, tinkerbellImageUbuntu128EnvVar, tinkerbellImageUbuntu129EnvVar, - tinkerbellImageUbuntu2204Kubernetes124EnvVar, + tinkerbellImageUbuntu130EnvVar, tinkerbellImageUbuntu2204Kubernetes125EnvVar, tinkerbellImageUbuntu2204Kubernetes126EnvVar, tinkerbellImageUbuntu2204Kubernetes127EnvVar, tinkerbellImageUbuntu2204Kubernetes128EnvVar, tinkerbellImageUbuntu2204Kubernetes129EnvVar, - tinkerbellImageRedHat124EnvVar, + tinkerbellImageUbuntu2204Kubernetes130EnvVar, tinkerbellImageRedHat125EnvVar, tinkerbellImageRedHat126EnvVar, tinkerbellImageRedHat127EnvVar, tinkerbellImageRedHat128EnvVar, tinkerbellImageRedHat129EnvVar, + tinkerbellImageRedHat130EnvVar, tinkerbellInventoryCsvFilePathEnvVar, tinkerbellSSHAuthorizedKey, } @@ -237,6 +237,11 @@ func WithUbuntu129Tinkerbell() TinkerbellOpt { return withKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, "", nil) } +// WithUbuntu130Tinkerbell tink test with ubuntu 1.30. +func WithUbuntu130Tinkerbell() TinkerbellOpt { + return withKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, "", nil) +} + // WithRedHat125Tinkerbell tink test with redhat 1.25. func WithRedHat125Tinkerbell() TinkerbellOpt { return withKubeVersionAndOS(anywherev1.Kube125, RedHat8, "", nil) @@ -262,6 +267,11 @@ func WithRedHat129Tinkerbell() TinkerbellOpt { return withKubeVersionAndOS(anywherev1.Kube129, RedHat8, "", nil) } +// WithRedHat130Tinkerbell tink test with redhat 1.30. +func WithRedHat130Tinkerbell() TinkerbellOpt { + return withKubeVersionAndOS(anywherev1.Kube130, RedHat8, "", nil) +} + func WithBottleRocketTinkerbell() TinkerbellOpt { return func(t *Tinkerbell) { t.fillers = append(t.fillers, @@ -344,6 +354,11 @@ func Ubuntu129Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, "") } +// Ubuntu130Image represents an Ubuntu raw image corresponding to Kubernetes 1.30. +func Ubuntu130Image() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, "") +} + // Ubuntu126ImageForCP represents an Ubuntu raw image corresponding to Kubernetes 1.28 and is set for CP machine config. func Ubuntu126ImageForCP() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube126, Ubuntu2004, controlPlaneIdentifier) @@ -364,6 +379,11 @@ func Ubuntu129ImageForCP() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, controlPlaneIdentifier) } +// Ubuntu130ImageForCP represents an Ubuntu raw image corresponding to Kubernetes 1.30 and is set for CP machine config. +func Ubuntu130ImageForCP() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, controlPlaneIdentifier) +} + // Ubuntu126ImageForWorker represents an Ubuntu raw image corresponding to Kubernetes 1.28 and is set for worker machine config. func Ubuntu126ImageForWorker() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube126, Ubuntu2004, workerIdentifier) @@ -384,6 +404,11 @@ func Ubuntu129ImageForWorker() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, workerIdentifier) } +// Ubuntu130ImageForWorker represents an Ubuntu raw image corresponding to Kubernetes 1.30 and is set for worker machine config. +func Ubuntu130ImageForWorker() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, workerIdentifier) +} + // Ubuntu2204Kubernetes126Image represents an Ubuntu 22.04 raw image corresponding to Kubernetes 1.26. func Ubuntu2204Kubernetes126Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube126, Ubuntu2204, "") @@ -403,3 +428,8 @@ func Ubuntu2204Kubernetes128Image() api.TinkerbellFiller { func Ubuntu2204Kubernetes129Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2204, "") } + +// Ubuntu2204Kubernetes130Image represents an Ubuntu 22.04 raw image corresponding to Kubernetes 1.30. +func Ubuntu2204Kubernetes130Image() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2204, "") +} From f6a2a833273cd0e4ae03cf3f66d1f8b911c0088c Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Tue, 21 May 2024 16:26:16 -0700 Subject: [PATCH 149/193] Fix Bottlerocket bare metal deprecation callout in changelog (#8202) Co-authored-by: Vivek Koppuru --- docs/content/en/docs/whatsnew/changelog.md | 30 +++++++++++++++------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/docs/content/en/docs/whatsnew/changelog.md b/docs/content/en/docs/whatsnew/changelog.md index b2ce89767cbd..ee9542bd1824 100644 --- a/docs/content/en/docs/whatsnew/changelog.md +++ b/docs/content/en/docs/whatsnew/changelog.md @@ -37,10 +37,11 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | -* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) ### Changed - Backporting dependency bumps to fix vulnerabilities [#8118](https://github.com/aws/eks-anywhere/pull/8118) @@ -60,10 +61,11 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | -* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) ### Changed - Upgraded EKS-Anywhere Packages from `v0.4.2` to [`v0.4.3`](https://github.com/aws/eks-anywhere-packages/releases/tag/v0.4.3) @@ -77,10 +79,12 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + ### Changed - Support Docs site for penultime EKS-A version [#8010](https://github.com/aws/eks-anywhere/pull/8010) - Update Ubuntu 22.04 ISO URLs to latest stable release [#3114](https://github.com/aws/eks-anywhere-build-tooling/pull/3114) @@ -103,10 +107,12 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + ### Changed - Updated helm to v3.14.3 [#3050](https://github.com/aws/eks-anywhere-build-tooling/pull/3050) @@ -121,10 +127,12 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + ### Changed - Update CAPC to 0.4.10-rc1 [#3105](https://github.com/aws/eks-anywhere-build-tooling/pull/3015) - Upgraded EKS-D: @@ -144,10 +152,12 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.2 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.2 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + ### Changed - Upgraded EKS-D: - `v1-25-eks-32` to [`v1-25-eks-34`](https://distro.eks.amazonaws.com/releases/1-25/34/) @@ -172,10 +182,12 @@ description: > |:-------------------:|:-------:|:----------:|:-------:|:----------:|:----:| | Ubuntu 20.04 | ✔ | ✔ | ✔ | — | ✔ | | Ubuntu 22.04 | ✔ | ✔ | ✔ | — | — | -| Bottlerocket 1.19.0 | ✔ | ✔ | — | — | — | +| Bottlerocket 1.19.0 | ✔ | \* | — | — | — | | RHEL 8.x | ✔ | ✔ | ✔ | ✔ | — | | RHEL 9.x | — | — | ✔ | ✔ | — | +\* [EKS Anywhere issue regarding deprecation of Bottlerocket bare metal variants](https://github.com/aws/eks-anywhere/issues/7754) + ### Added - Support for Kubernetes v1.29 - Support for in-place EKS Anywhere and Kubernetes version upgrades on Bare Metal clusters From 2576e84c391f86e834aaa30d81a7e214b344ddac Mon Sep 17 00:00:00 2001 From: Xu Deng Date: Wed, 22 May 2024 10:52:57 -0400 Subject: [PATCH 150/193] Add succeeded variable in the e2e result log entry (#8199) --- internal/test/e2e/run.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/test/e2e/run.go b/internal/test/e2e/run.go index 595fae1db8bb..e5c787811b1a 100644 --- a/internal/test/e2e/run.go +++ b/internal/test/e2e/run.go @@ -146,6 +146,8 @@ func RunTestsInParallel(conf ParallelRunConf) error { completedInstances := 0 for r := range results { var result string + // This variable can be used in cloudwatch log insights query for e2e test success rate + succeeded := 0 // TODO: keeping the old logs temporarily for compatibility with the test tool // Once the tool is updated to support the unified message, remove them if r.err != nil { @@ -158,6 +160,7 @@ func RunTestsInParallel(conf ParallelRunConf) error { failedInstances++ } else { result = testResultPass + succeeded = 1 conf.Logger.Info("Instance tests completed successfully", "jobId", r.conf.JobID, "instanceId", r.conf.InstanceID, "commandId", r.testCommandResult.CommandId, "tests", r.conf.Regex, "status", testResultPass) } completedInstances++ @@ -168,6 +171,7 @@ func RunTestsInParallel(conf ParallelRunConf) error { "instanceId", r.conf.InstanceID, "completedInstances", completedInstances, "totalInstances", totalInstances, + "succeeded", succeeded, ) putInstanceTestResultMetrics(r) } From 31e1af93bcffa20c389ec412a8e412ae850c4e15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 08:13:03 -0700 Subject: [PATCH 151/193] --- (#8182) updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/go-coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go-coverage.yml b/.github/workflows/go-coverage.yml index d271501d86dd..4dbae47db862 100644 --- a/.github/workflows/go-coverage.yml +++ b/.github/workflows/go-coverage.yml @@ -22,7 +22,7 @@ jobs: - name: Run go test with coverage run: COVER_PROFILE=coverage.txt make coverage-unit-test - name: Codecov upload - uses: codecov/codecov-action@v4.3.1 + uses: codecov/codecov-action@v4.4.1 with: files: ./coverage.txt token: ${{ secrets.CODECOV_TOKEN }} From d6ea16c5e75ae89baaeffe6476dff7bb2b91a442 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 22 May 2024 08:24:58 -0700 Subject: [PATCH 152/193] [PR BOT] Generate release testdata files (#8165) --- .../testdata/main-bundle-release.yaml | 432 +++++++++--------- 1 file changed, 216 insertions(+), 216 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 0d166fa3745a..4c7ae8796f1f 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -10,7 +10,7 @@ spec: versionsBundles: - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -19,7 +19,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -30,8 +30,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: @@ -67,7 +67,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -76,7 +76,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -85,7 +85,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -94,10 +94,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml - version: v1.14.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -106,7 +106,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -167,7 +167,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -176,7 +176,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -187,11 +187,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -200,7 +200,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -211,13 +211,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -235,10 +235,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -306,7 +306,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -315,9 +315,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -326,7 +326,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -385,7 +385,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -403,7 +403,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -413,7 +413,7 @@ spec: name: source-controller os: linux uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -751,11 +751,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -784,11 +784,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.25.3-eks-d-1-25-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml - version: v1.9.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -797,7 +797,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -808,8 +808,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: @@ -845,7 +845,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -854,7 +854,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -863,7 +863,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -872,10 +872,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml - version: v1.14.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -884,7 +884,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -945,7 +945,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -954,7 +954,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -965,11 +965,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -978,7 +978,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -989,13 +989,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1013,10 +1013,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -1084,7 +1084,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1093,9 +1093,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1104,7 +1104,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -1163,7 +1163,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -1181,7 +1181,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -1191,7 +1191,7 @@ spec: name: source-controller os: linux uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -1529,11 +1529,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -1562,11 +1562,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.26.2-eks-d-1-26-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml - version: v1.9.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -1575,7 +1575,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1586,8 +1586,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: @@ -1623,7 +1623,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -1632,7 +1632,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -1641,7 +1641,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -1650,10 +1650,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml - version: v1.14.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -1662,7 +1662,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -1723,7 +1723,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -1732,7 +1732,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1743,11 +1743,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -1756,7 +1756,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -1767,13 +1767,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -1791,10 +1791,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -1862,7 +1862,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -1871,9 +1871,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -1882,7 +1882,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -1941,7 +1941,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -1959,7 +1959,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -1969,7 +1969,7 @@ spec: name: source-controller os: linux uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -2307,11 +2307,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -2340,11 +2340,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.27.0-eks-d-1-27-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml - version: v1.9.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -2353,7 +2353,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2364,8 +2364,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: @@ -2401,7 +2401,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -2410,7 +2410,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -2419,7 +2419,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -2428,10 +2428,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml - version: v1.14.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -2440,7 +2440,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -2501,7 +2501,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -2510,7 +2510,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2521,11 +2521,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -2534,7 +2534,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -2545,13 +2545,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -2569,10 +2569,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -2640,7 +2640,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -2649,9 +2649,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -2660,7 +2660,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -2719,7 +2719,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -2737,7 +2737,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -2747,7 +2747,7 @@ spec: name: source-controller os: linux uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -3085,11 +3085,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -3118,11 +3118,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.28.0-eks-d-1-28-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml - version: v1.9.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -3131,7 +3131,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3142,8 +3142,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: @@ -3179,7 +3179,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -3188,7 +3188,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -3197,7 +3197,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -3206,10 +3206,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml - version: v1.14.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -3218,7 +3218,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -3279,7 +3279,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -3288,7 +3288,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3299,11 +3299,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -3312,7 +3312,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3323,13 +3323,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -3347,10 +3347,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -3418,7 +3418,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -3427,9 +3427,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -3438,7 +3438,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -3497,7 +3497,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -3515,7 +3515,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -3525,7 +3525,7 @@ spec: name: source-controller os: linux uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -3863,11 +3863,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -3896,11 +3896,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.29.0-eks-d-1-29-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml - version: v1.9.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 - bootstrap: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/bootstrap-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/bootstrap-components.yaml controller: arch: - amd64 @@ -3909,7 +3909,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-bootstrap-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -3920,8 +3920,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 bottlerocketHostContainers: admin: arch: @@ -3957,7 +3957,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-acmesolver os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-acmesolver:v1.14.5-eks-a-v0.0.0-dev-build.1 cainjector: arch: - amd64 @@ -3966,7 +3966,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-cainjector os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-cainjector:v1.14.5-eks-a-v0.0.0-dev-build.1 controller: arch: - amd64 @@ -3975,7 +3975,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-controller os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-controller:v1.14.5-eks-a-v0.0.0-dev-build.1 ctl: arch: - amd64 @@ -3984,10 +3984,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-ctl os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-ctl:v1.14.5-eks-a-v0.0.0-dev-build.1 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.4/cert-manager.yaml - version: v1.14.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cert-manager/manifests/v1.14.5/cert-manager.yaml + version: v1.14.5+abcdef1 webhook: arch: - amd64 @@ -3996,7 +3996,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cert-manager-webhook os: linux - uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/cert-manager/cert-manager-webhook:v1.14.5-eks-a-v0.0.0-dev-build.1 cilium: cilium: arch: @@ -4057,7 +4057,7 @@ spec: version: v0.4.10-rc1+abcdef1 clusterAPI: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/core-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/core-components.yaml controller: arch: - amd64 @@ -4066,7 +4066,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -4077,11 +4077,11 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 controlPlane: components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/control-plane-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/control-plane-components.yaml controller: arch: - amd64 @@ -4090,7 +4090,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kubeadm-control-plane-controller os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.7.2-eks-a-v0.0.0-dev-build.1 kubeProxy: arch: - amd64 @@ -4101,13 +4101,13 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 docker: clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/cluster-template-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/cluster-template-development.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/infrastructure-components-development.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/infrastructure-components-development.yaml kubeProxy: arch: - amd64 @@ -4125,10 +4125,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-docker os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.7.2-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.1/metadata.yaml - version: v1.7.1+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/infrastructure-docker/v1.7.2/metadata.yaml + version: v1.7.2+abcdef1 eksD: ami: bottlerocket: {} @@ -4196,7 +4196,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cli-tools os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.19.6-eks-a-v0.0.0-dev-build.1 clusterController: arch: - amd64 @@ -4205,9 +4205,9 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-cluster-controller os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.19.6-eks-a-v0.0.0-dev-build.1 components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.5/eksa-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-anywhere/manifests/cluster-controller/v0.19.6/eksa-components.yaml diagnosticCollector: arch: - amd64 @@ -4216,7 +4216,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: eks-anywhere-diagnostic-collector os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.19.6-eks-a-v0.0.0-dev-build.1 version: v0.0.0-dev+build.0+abcdef1 etcdadmBootstrap: components: @@ -4275,7 +4275,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: helm-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.37.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v1.0.1-eks-a-v0.0.0-dev-build.1 kustomizeController: arch: - amd64 @@ -4293,7 +4293,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: notification-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.2.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 sourceController: arch: - amd64 @@ -4303,7 +4303,7 @@ spec: name: source-controller os: linux uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 - version: v2.2.3+abcdef1 + version: v2.3.0+abcdef1 haproxy: image: arch: @@ -4641,11 +4641,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-vsphere os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.9.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.10.0-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/infrastructure-components.yaml kubeProxy: arch: - amd64 @@ -4674,6 +4674,6 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.30.0-eks-d-1-30-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.9.3/metadata.yaml - version: v1.9.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.10.0/metadata.yaml + version: v1.10.0+abcdef1 status: {} From cd34399a6c3dbd371d9403542e0066e1b498d876 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 22 May 2024 12:26:58 -0700 Subject: [PATCH 153/193] [PR BOT] Generate release testdata files (#8206) --- .../testdata/main-bundle-release.yaml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 4c7ae8796f1f..a4543c62bba8 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -394,7 +394,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -412,7 +412,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 version: v2.3.0+abcdef1 haproxy: image: @@ -1172,7 +1172,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -1190,7 +1190,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 version: v2.3.0+abcdef1 haproxy: image: @@ -1950,7 +1950,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -1968,7 +1968,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 version: v2.3.0+abcdef1 haproxy: image: @@ -2728,7 +2728,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -2746,7 +2746,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 version: v2.3.0+abcdef1 haproxy: image: @@ -3506,7 +3506,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -3524,7 +3524,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 version: v2.3.0+abcdef1 haproxy: image: @@ -4284,7 +4284,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kustomize-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.2.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 notificationController: arch: - amd64 @@ -4302,7 +4302,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: source-controller os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.2.5-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v1.3.0-eks-a-v0.0.0-dev-build.1 version: v2.3.0+abcdef1 haproxy: image: From b62fe778126ec2c6098d2e2c8a5ce1ca1ba5ba5d Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Wed, 22 May 2024 13:42:11 -0700 Subject: [PATCH 154/193] Add 1-30 E2E tests for Docker (#8204) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/SKIPPED_TESTS.yaml | 9 ++ test/e2e/constants.go | 2 +- test/e2e/docker_test.go | 260 ++++++++++++++++++------------------ 3 files changed, 140 insertions(+), 131 deletions(-) diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index 22de9114567d..676d19098138 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -1,4 +1,13 @@ skipped_tests: +# Docker +# Skipping 1.30 curated packages tests until we add support +- TestDockerKubernetes130CuratedPackagesSimpleFlow +- TestDockerKubernetes130CuratedPackagesAdotSimpleFlow +- TestDockerKubernetes130CuratedPackagesEmissarySimpleFlow +- TestDockerKubernetes130CuratedPackagesHarborSimpleFlow +- TestDockerKubernetes130CuratedPackagesPrometheusSimpleFlow +- TestDockerKubernetes130CuratedPackagesMetalLB + # CloudStack #Airgapped tests - TestCloudStackKubernetes126RedhatAirgappedRegistryMirror diff --git a/test/e2e/constants.go b/test/e2e/constants.go index 600138a56c4d..47e0c9892e89 100644 --- a/test/e2e/constants.go +++ b/test/e2e/constants.go @@ -48,5 +48,5 @@ const ( var ( EksaPackageControllerHelmValues = []string{"sourceRegistry=public.ecr.aws/l0g8r8j6"} - KubeVersions = []v1alpha1.KubernetesVersion{v1alpha1.Kube125, v1alpha1.Kube126, v1alpha1.Kube127, v1alpha1.Kube128, v1alpha1.Kube129} + KubeVersions = []v1alpha1.KubernetesVersion{v1alpha1.Kube125, v1alpha1.Kube126, v1alpha1.Kube127, v1alpha1.Kube128, v1alpha1.Kube129, v1alpha1.Kube130} ) diff --git a/test/e2e/docker_test.go b/test/e2e/docker_test.go index 8d7dc2311084..d70911ac2d68 100644 --- a/test/e2e/docker_test.go +++ b/test/e2e/docker_test.go @@ -94,18 +94,6 @@ func TestDockerInstallGithubFluxDuringUpgrade(t *testing.T) { ) } -func TestDockerKubernetes125CuratedPackagesSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageInstallSimpleFlow(test) -} - func TestDockerKubernetes126CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, @@ -154,16 +142,16 @@ func TestDockerKubernetes129CuratedPackagesSimpleFlow(t *testing.T) { runCuratedPackageInstallSimpleFlow(test) } -func TestDockerKubernetes125CuratedPackagesEmissarySimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageEmissaryInstallSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) } func TestDockerKubernetes126CuratedPackagesEmissarySimpleFlow(t *testing.T) { @@ -214,16 +202,16 @@ func TestDockerKubernetes129CuratedPackagesEmissarySimpleFlow(t *testing.T) { runCuratedPackageEmissaryInstallSimpleFlow(test) } -func TestDockerKubernetes125CuratedPackagesHarborSimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesEmissarySimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) + runCuratedPackageEmissaryInstallSimpleFlow(test) } func TestDockerKubernetes126CuratedPackagesHarborSimpleFlow(t *testing.T) { @@ -274,15 +262,16 @@ func TestDockerKubernetes129CuratedPackagesHarborSimpleFlow(t *testing.T) { runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } -func TestDockerKubernetes125CuratedPackagesAdotSimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesHarborSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + test := framework.NewClusterE2ETest(t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) } func TestDockerKubernetes126CuratedPackagesAdotSimpleFlow(t *testing.T) { @@ -329,15 +318,15 @@ func TestDockerKubernetes129CuratedPackagesAdotSimpleFlow(t *testing.T) { runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary } -func TestDockerKubernetes125CuratedPackagesPrometheusSimpleFlow(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesAdotSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) + runCuratedPackagesAdotInstallSimpleFlow(test) // other args as necessary } func TestDockerKubernetes126CuratedPackagesPrometheusSimpleFlow(t *testing.T) { @@ -384,16 +373,15 @@ func TestDockerKubernetes129CuratedPackagesPrometheusSimpleFlow(t *testing.T) { runCuratedPackagesPrometheusInstallSimpleFlow(test) } -func TestDockerKubernetes125CuratedPackagesDisabled(t *testing.T) { +func TestDockerKubernetes130CuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, - &v1alpha1.PackageConfiguration{Disable: true}), + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary + runCuratedPackagesPrometheusInstallSimpleFlow(test) } func TestDockerKubernetes126CuratedPackagesDisabled(t *testing.T) { @@ -420,8 +408,16 @@ func TestDockerKubernetes128CuratedPackagesDisabled(t *testing.T) { runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary } -func TestDockerKubernetes125CuratedPackagesMetalLB(t *testing.T) { - RunMetalLBDockerTestsForKubeVersion(t, v1alpha1.Kube125) +func TestDockerKubernetes129CuratedPackagesDisabled(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube129), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, + &v1alpha1.PackageConfiguration{Disable: true}), + ) + runDisabledCuratedPackageInstallSimpleFlow(test) // other args as necessary } func TestDockerKubernetes126CuratedPackagesMetalLB(t *testing.T) { @@ -440,16 +436,11 @@ func TestDockerKubernetes129CuratedPackagesMetalLB(t *testing.T) { RunMetalLBDockerTestsForKubeVersion(t, v1alpha1.Kube129) } -// AWS IAM Auth -func TestDockerKubernetes125AWSIamAuth(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewDocker(t), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runAWSIamAuthFlow(test) +func TestDockerKubernetes130CuratedPackagesMetalLB(t *testing.T) { + RunMetalLBDockerTestsForKubeVersion(t, v1alpha1.Kube130) } +// AWS IAM Auth func TestDockerKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewDocker(t), @@ -486,6 +477,15 @@ func TestDockerKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } +func TestDockerKubernetes130AWSIamAuth(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewDocker(t), + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runAWSIamAuthFlow(test) +} + // Flux func TestDockerKubernetes125UpgradeWorkloadClusterWithGithubFlux(t *testing.T) { provider := framework.NewDocker(t) @@ -526,15 +526,6 @@ func TestDockerKubernetes125UpgradeWorkloadClusterWithGithubFlux(t *testing.T) { } // OIDC -func TestDockerKubernetes125OIDC(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewDocker(t), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runOIDCFlow(test) -} - func TestDockerKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewDocker(t), @@ -571,66 +562,66 @@ func TestDockerKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } +func TestDockerKubernetes130OIDC(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewDocker(t), + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runOIDCFlow(test) +} + // RegistryMirror -func TestDockerKubernetes127RegistryMirrorAndCert(t *testing.T) { +func TestDockerKubernetes130RegistryMirrorAndCert(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName), ) runRegistryMirrorConfigFlow(test) } -func TestDockerKubernetes127AirgappedRegistryMirrorAndCert(t *testing.T) { +func TestDockerKubernetes130AirgappedRegistryMirrorAndCert(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName), ) runDockerAirgapConfigFlow(test) } -func TestDockerKubernetes127AirgappedUpgradeFromLatestRegistryMirrorAndCert(t *testing.T) { +func TestDockerKubernetes130AirgappedUpgradeFromLatestRegistryMirrorAndCert(t *testing.T) { release := latestMinorRelease(t) test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorEndpointAndCert(constants.DockerProviderName), ) runDockerAirgapUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube127, + v1alpha1.Kube130, ) } -func TestDockerKubernetes127RegistryMirrorInsecureSkipVerify(t *testing.T) { +func TestDockerKubernetes130RegistryMirrorInsecureSkipVerify(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithRegistryMirrorInsecureSkipVerify(constants.DockerProviderName), ) runRegistryMirrorConfigFlow(test) } // Simple flow -func TestDockerKubernetes125SimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewDocker(t), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - ) - runSimpleFlow(test) -} - func TestDockerKubernetes126SimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -667,6 +658,15 @@ func TestDockerKubernetes129SimpleFlow(t *testing.T) { runSimpleFlow(test) } +func TestDockerKubernetes130SimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewDocker(t), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + // Stacked etcd func TestDockerKubernetesStackedEtcd(t *testing.T) { test := framework.NewClusterE2ETest(t, @@ -676,14 +676,14 @@ func TestDockerKubernetesStackedEtcd(t *testing.T) { } // Taints -func TestDockerKubernetes128Taints(t *testing.T) { +func TestDockerKubernetes130Taints(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -695,7 +695,7 @@ func TestDockerKubernetes128Taints(t *testing.T) { runTaintsUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithWorkerNodeGroup(worker0, api.WithTaint(framework.NoExecuteTaint())), api.WithWorkerNodeGroup(worker1, api.WithTaint(framework.NoExecuteTaint())), @@ -705,14 +705,14 @@ func TestDockerKubernetes128Taints(t *testing.T) { ) } -func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) { +func TestDockerKubernetes130WorkloadClusterTaints(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -726,7 +726,7 @@ func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) { t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -741,43 +741,43 @@ func TestDockerKubernetes127WorkloadClusterTaints(t *testing.T) { } // Upgrade -func TestDockerKubernetes127To128StackedEtcdUpgrade(t *testing.T) { +func TestDockerKubernetes129To130StackedEtcdUpgrade(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller(api.WithStackedEtcdTopology()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), ) } -func TestDockerKubernetes127To128ExternalEtcdUpgrade(t *testing.T) { +func TestDockerKubernetes129To130ExternalEtcdUpgrade(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), ) } -func TestDockerKubernetes125to126UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes126to127UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -785,18 +785,18 @@ func TestDockerKubernetes125to126UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), + v1alpha1.Kube127, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), ) } -func TestDockerKubernetes126to127UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes127to128UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube126)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -804,18 +804,18 @@ func TestDockerKubernetes126to127UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube127, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), + v1alpha1.Kube128, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), ) } -func TestDockerKubernetes127to128UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes128to129UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube127)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -823,18 +823,18 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube128, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), ) } -func TestDockerKubernetes128to129UpgradeFromLatestMinorRelease(t *testing.T) { +func TestDockerKubernetes129to130UpgradeFromLatestMinorRelease(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube128)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), @@ -842,8 +842,8 @@ func TestDockerKubernetes128to129UpgradeFromLatestMinorRelease(t *testing.T) { runUpgradeFromReleaseFlow( test, release, - v1alpha1.Kube129, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), ) } @@ -1022,7 +1022,7 @@ func TestDockerUpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsAPI(t *testin ) } -func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) { +func TestDockerKubernetes129to130UpgradeFromLatestMinorReleaseAPI(t *testing.T) { release := latestMinorRelease(t) provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( @@ -1030,7 +1030,7 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) ) managementCluster.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release)) managementCluster.UpdateClusterConfig(api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), )) test := framework.NewMulticlusterE2ETest(t, managementCluster) @@ -1039,7 +1039,7 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) ) wc.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release)) wc.UpdateClusterConfig(api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1050,18 +1050,18 @@ func TestDockerKubernetes127to128UpgradeFromLatestMinorReleaseAPI(t *testing.T) runMulticlusterUpgradeFromReleaseFlowAPI( test, release, - v1alpha1.Kube128, + v1alpha1.Kube130, "", ) } -func TestDockerUpgradeKubernetes127to128WorkloadClusterScaleupGitHubFluxAPI(t *testing.T) { +func TestDockerUpgradeKubernetes129to130WorkloadClusterScaleupGitHubFluxAPI(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -1074,7 +1074,7 @@ func TestDockerUpgradeKubernetes127to128WorkloadClusterScaleupGitHubFluxAPI(t *t t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1086,20 +1086,20 @@ func TestDockerUpgradeKubernetes127to128WorkloadClusterScaleupGitHubFluxAPI(t *t runWorkloadClusterUpgradeFlowAPIWithFlux( test, api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(3), api.WithWorkerNodeGroup("worker-0", api.WithCount(2)), ), ) } -func TestDockerKubernetes128UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t *testing.T) { +func TestDockerKubernetes130UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -1112,7 +1112,7 @@ func TestDockerKubernetes128UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1136,13 +1136,13 @@ func TestDockerKubernetes128UpgradeWorkloadClusterLabelsAndTaintsGitHubFluxAPI(t ) } -func TestDockerKubernetes128UpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsGitHubFluxAPI(t *testing.T) { +func TestDockerKubernetes130UpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroupsGitHubFluxAPI(t *testing.T) { provider := framework.NewDocker(t) managementCluster := framework.NewClusterE2ETest( t, provider, framework.WithFluxGithubEnvVarCheck(), framework.WithFluxGithubCleanup(), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), api.WithExternalEtcdTopology(1), @@ -1158,7 +1158,7 @@ func TestDockerKubernetes128UpgradeWorkloadClusterScaleAddRemoveWorkerNodeGroups t, provider, framework.WithClusterName(test.NewWorkloadClusterName()), ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithManagementCluster(managementCluster.ClusterName), api.WithControlPlaneCount(1), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1371,12 +1371,12 @@ func TestDockerKubernetesUpgradeManagementComponents(t *testing.T) { } // etcd scale tests -func TestDockerKubernetes128EtcdScaleUp(t *testing.T) { +func TestDockerKubernetes130EtcdScaleUp(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1385,19 +1385,19 @@ func TestDockerKubernetes128EtcdScaleUp(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithExternalEtcdTopology(3), ), ) } -func TestDockerKubernetes128EtcdScaleDown(t *testing.T) { +func TestDockerKubernetes130EtcdScaleDown(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewDocker(t), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(3), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1406,20 +1406,20 @@ func TestDockerKubernetes128EtcdScaleDown(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( api.WithExternalEtcdTopology(1), ), ) } -func TestDockerKubernetes127to128EtcdScaleUp(t *testing.T) { +func TestDockerKubernetes129to130EtcdScaleUp(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(1), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1428,21 +1428,21 @@ func TestDockerKubernetes127to128EtcdScaleUp(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(3), ), ) } -func TestDockerKubernetes127to128EtcdScaleDown(t *testing.T) { +func TestDockerKubernetes129to130EtcdScaleDown(t *testing.T) { provider := framework.NewDocker(t) test := framework.NewClusterE2ETest( t, provider, framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube127), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithExternalEtcdTopology(3), api.WithControlPlaneCount(1), api.WithWorkerNodeCount(1), @@ -1451,9 +1451,9 @@ func TestDockerKubernetes127to128EtcdScaleDown(t *testing.T) { runSimpleUpgradeFlow( test, - v1alpha1.Kube128, + v1alpha1.Kube130, framework.WithClusterUpgrade( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithExternalEtcdTopology(1), ), ) From 084fc3fca1b0db21fd76674f5ea153413d27dfb8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 17:00:30 -0400 Subject: [PATCH 155/193] --- (#8194) updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 6859c6fc4eff..0fe09e38c76e 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -17,7 +17,7 @@ require ( github.com/spf13/viper v1.18.2 golang.org/x/sync v0.7.0 helm.sh/helm/v3 v3.14.4 - k8s.io/apimachinery v0.29.4 + k8s.io/apimachinery v0.29.5 k8s.io/helm v2.17.0+incompatible sigs.k8s.io/controller-runtime v0.16.6 sigs.k8s.io/yaml v1.4.0 diff --git a/release/cli/go.sum b/release/cli/go.sum index 9f5a29adf6c0..6615039beeb5 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -880,8 +880,8 @@ k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdB k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= -k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apimachinery v0.29.5 h1:Hofa2BmPfpoT+IyDTlcPdCHSnHtEQMoJYGVoQpRTfv4= +k8s.io/apimachinery v0.29.5/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= From 5e1940e091534883212b51059e88d1584a6f5da6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 17:09:56 -0400 Subject: [PATCH 156/193] --- (#8196) updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 0fe09e38c76e..68a94260c5bf 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/aws/aws-sdk-go v1.52.3 - github.com/aws/aws-sdk-go-v2 v1.26.1 + github.com/aws/aws-sdk-go-v2 v1.27.0 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e github.com/fsouza/go-dockerclient v1.11.0 diff --git a/release/cli/go.sum b/release/cli/go.sum index 6615039beeb5..0ce0fcafee6f 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -58,8 +58,8 @@ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:o github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.52.3 h1:BNPJmHOXNoM/iBWJKrvaQvJOweRcp3KLpzdb65CfQwU= github.com/aws/aws-sdk-go v1.52.3/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= -github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo= +github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e/go.mod h1:p/KHVJAMv3kofnUnShkZ6pUnZYzm+LK2G7bIi8nnTKA= github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= From 27e32eef0a1cd30d083617bc108996f62e70786a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 17:10:50 -0400 Subject: [PATCH 157/193] --- (#8198) updated-dependencies: - dependency-name: github.com/go-logr/logr dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 68a94260c5bf..6019a1142491 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -9,7 +9,7 @@ require ( github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e github.com/fsouza/go-dockerclient v1.11.0 github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.2 github.com/mitchellh/go-homedir v1.1.0 github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 diff --git a/release/cli/go.sum b/release/cli/go.sum index 0ce0fcafee6f..321f132e5997 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -192,8 +192,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= From 040476b4faeb1b9904b53cf7c32a78d45410354d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 18:39:58 -0700 Subject: [PATCH 158/193] Bump github.com/aws/aws-sdk-go from 1.52.3 to 1.53.8 in /release/cli (#8207) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.52.3 to 1.53.8. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.52.3...v1.53.8) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 6019a1142491..d989b59a0213 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.21 require ( - github.com/aws/aws-sdk-go v1.52.3 + github.com/aws/aws-sdk-go v1.53.8 github.com/aws/aws-sdk-go-v2 v1.27.0 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index 321f132e5997..fc0e02e99328 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.52.3 h1:BNPJmHOXNoM/iBWJKrvaQvJOweRcp3KLpzdb65CfQwU= -github.com/aws/aws-sdk-go v1.52.3/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k= +github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo= github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From c4bff761bbb47680bfbb99dd0f236318f16db9f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 May 2024 19:17:58 -0700 Subject: [PATCH 159/193] Bump github.com/vmware/govmomi from 0.34.2 to 0.37.2 (#8148) Bumps [github.com/vmware/govmomi](https://github.com/vmware/govmomi) from 0.34.2 to 0.37.2. - [Release notes](https://github.com/vmware/govmomi/releases) - [Changelog](https://github.com/vmware/govmomi/blob/main/CHANGELOG.md) - [Commits](https://github.com/vmware/govmomi/compare/v0.34.2...v0.37.2) --- updated-dependencies: - dependency-name: github.com/vmware/govmomi dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index c25471dac216..6fc0d9107cb0 100644 --- a/go.mod +++ b/go.mod @@ -37,10 +37,10 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/tinkerbell/cluster-api-provider-tinkerbell v0.1.1-0.20220615214617-9e9c2a397288 github.com/tinkerbell/tink v0.8.0 - github.com/vmware/govmomi v0.34.2 + github.com/vmware/govmomi v0.37.2 go.uber.org/zap v1.26.0 golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa diff --git a/go.sum b/go.sum index 8d800a6755b2..56291cd7858f 100644 --- a/go.sum +++ b/go.sum @@ -856,8 +856,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -881,8 +882,8 @@ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4= github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc= github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= -github.com/vmware/govmomi v0.34.2 h1:o6ydkTVITOkpQU6HAf6tP5GvHFCNJlNUNlMsvFK77X4= -github.com/vmware/govmomi v0.34.2/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= +github.com/vmware/govmomi v0.37.2 h1:5ANLoaTxWv600ZnoosJ2zXbM3A+EaxqGheEZbRN8YVE= +github.com/vmware/govmomi v0.37.2/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= From dc21cf6966e2dca254886f3edba334e314c081c3 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 22 May 2024 20:33:58 -0700 Subject: [PATCH 160/193] [PR BOT] Generate release testdata files (#8209) --- .../testdata/main-bundle-release.yaml | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index a4543c62bba8..99ca48b2fe64 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -447,11 +447,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -462,8 +462,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml + version: v1.3.4+abcdef1 packageController: credentialProviderPackage: arch: @@ -1225,11 +1225,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -1240,8 +1240,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml + version: v1.3.4+abcdef1 packageController: credentialProviderPackage: arch: @@ -2003,11 +2003,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2018,8 +2018,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml + version: v1.3.4+abcdef1 packageController: credentialProviderPackage: arch: @@ -2781,11 +2781,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2796,8 +2796,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml + version: v1.3.4+abcdef1 packageController: credentialProviderPackage: arch: @@ -3559,11 +3559,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -3574,8 +3574,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml + version: v1.3.4+abcdef1 packageController: credentialProviderPackage: arch: @@ -4337,11 +4337,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -4352,8 +4352,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml + version: v1.3.4+abcdef1 packageController: credentialProviderPackage: arch: From 95096a68bb4c75753e0470271fefd7db9ea47254 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 23 May 2024 21:18:59 -0400 Subject: [PATCH 161/193] Skip dependency review and govulncheck actions when only YAML files are changed (#8210) --- .github/workflows/vulnerability.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/vulnerability.yml b/.github/workflows/vulnerability.yml index 9f8522e0ce47..1900fdad88ec 100644 --- a/.github/workflows/vulnerability.yml +++ b/.github/workflows/vulnerability.yml @@ -6,6 +6,9 @@ on: branches: - main pull_request: + paths-ignore: + - '**.yaml' + - '**.yml' workflow_dispatch: schedule: # every day at 7am UTC From e49451ab09ff86a7068351f46c3472a0250d12b2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 May 2024 23:17:59 -0700 Subject: [PATCH 162/193] Bump helm.sh/helm/v3 from 3.14.4 to 3.15.0 in /release/cli (#8197) * --- updated-dependencies: - dependency-name: helm.sh/helm/v3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update controller-runtime to v0.18.2 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Abhay Krishna Arunachalam --- release/Makefile | 2 +- release/cli/go.mod | 28 +++++++++++----------- release/cli/go.sum | 60 +++++++++++++++++++++++----------------------- 3 files changed, 45 insertions(+), 45 deletions(-) diff --git a/release/Makefile b/release/Makefile index 920ec2304060..9bbaa2bad199 100644 --- a/release/Makefile +++ b/release/Makefile @@ -45,7 +45,7 @@ GOBIN=$(shell go env GOBIN) endif # Setup Go -GOLANG_VERSION?="1.21" +GOLANG_VERSION?="1.22" GO_VERSION ?= $(shell source $(REPO_ROOT)/scripts/common.sh && build::common::get_go_path $(GOLANG_VERSION)) GO ?= $(GO_VERSION)/go diff --git a/release/cli/go.mod b/release/cli/go.mod index d989b59a0213..80492680e7ba 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -1,6 +1,6 @@ module github.com/aws/eks-anywhere/release/cli -go 1.21 +go 1.22.3 require ( github.com/aws/aws-sdk-go v1.53.8 @@ -16,10 +16,10 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 golang.org/x/sync v0.7.0 - helm.sh/helm/v3 v3.14.4 - k8s.io/apimachinery v0.29.5 + helm.sh/helm/v3 v3.15.0 + k8s.io/apimachinery v0.30.0 k8s.io/helm v2.17.0+incompatible - sigs.k8s.io/controller-runtime v0.16.6 + sigs.k8s.io/controller-runtime v0.18.2 sigs.k8s.io/yaml v1.4.0 ) @@ -54,7 +54,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -164,15 +164,15 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.4 // indirect - k8s.io/apiextensions-apiserver v0.29.1 // indirect - k8s.io/apiserver v0.29.4 // indirect - k8s.io/cli-runtime v0.29.0 // indirect - k8s.io/client-go v0.29.4 // indirect - k8s.io/component-base v0.29.4 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/kubectl v0.29.0 // indirect + k8s.io/api v0.30.0 // indirect + k8s.io/apiextensions-apiserver v0.30.0 // indirect + k8s.io/apiserver v0.30.0 // indirect + k8s.io/cli-runtime v0.30.0 // indirect + k8s.io/client-go v0.30.0 // indirect + k8s.io/component-base v0.30.0 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kubectl v0.30.0 // indirect k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index fc0e02e99328..16ef77be5e03 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -159,8 +159,8 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -191,7 +191,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -602,8 +601,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -650,8 +650,8 @@ go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCD go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -868,32 +868,32 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.14.4 h1:6FSpEfqyDalHq3kUr4gOMThhgY55kXUEjdQoyODYnrM= -helm.sh/helm/v3 v3.14.4/go.mod h1:Tje7LL4gprZpuBNTbG34d1Xn5NmRT3OWfBRwpOSer9I= +helm.sh/helm/v3 v3.15.0 h1:gcLxHeFp0Hfo7lYi6KIZ84ZyvlAnfFRSJ8lTL3zvG5U= +helm.sh/helm/v3 v3.15.0/go.mod h1:fvfoRcB8UKRUV5jrIfOTaN/pG1TPhuqSb56fjYdTKXg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= -k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= +k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= +k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= +k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= +k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.29.5 h1:Hofa2BmPfpoT+IyDTlcPdCHSnHtEQMoJYGVoQpRTfv4= -k8s.io/apimachinery v0.29.5/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= +k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= -k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= -k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4= -k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk= +k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= +k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= +k8s.io/cli-runtime v0.30.0 h1:0vn6/XhOvn1RJ2KJOC6IRR2CGqrpT6QQF4+8pYpWQ48= +k8s.io/cli-runtime v0.30.0/go.mod h1:vATpDMATVTMA79sZ0YUCzlMelf6rUjoBzlp+RnoM+cg= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= -k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= +k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= +k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= -k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o= +k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/helm v2.17.0+incompatible h1:Bpn6o1wKLYqKM3+Osh8e+1/K2g/GsQJ4F4yNF2+deao= @@ -901,13 +901,13 @@ k8s.io/helm v2.17.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI= -k8s.io/kubectl v0.29.0/go.mod h1:0jMjGWIcMIQzmUaMgAzhSELv5WtHo2a8pq67DtviAJs= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.30.0 h1:xbPvzagbJ6RNYVMVuiHArC1grrV5vSmmIcSZuCdzRyk= +k8s.io/kubectl v0.30.0/go.mod h1:zgolRw2MQXLPwmic2l/+iHs239L49fhSeICuMhQQXTI= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= @@ -919,8 +919,8 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= -sigs.k8s.io/controller-runtime v0.16.6 h1:FiXwTuFF5ZJKmozfP2Z0j7dh6kmxP4Ou1KLfxgKKC3I= -sigs.k8s.io/controller-runtime v0.16.6/go.mod h1:+dQzkZxnylD0u49e0a+7AR+vlibEBaThmPca7lTyUsI= +sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLqlNpx+Q= +sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= From 230d5f0815d02a9a53cc918031d7ce1577c1be56 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Fri, 24 May 2024 19:49:48 +0200 Subject: [PATCH 163/193] Fix bug when installer does not create CCM secret for Nutanix workload cluster (#8191) * Fix bug when installer do not in create CCM secret for Nutanix worker cluster - fixed templates - fixed reconciler - improved tests * Fix linter errors --- pkg/providers/nutanix/config/cp-template.yaml | 28 +++++++++++++++++++ pkg/providers/nutanix/controlplane.go | 10 +++++++ .../nutanix/reconciler/reconciler.go | 10 ++++++- pkg/providers/nutanix/template.go | 5 +++- pkg/providers/nutanix/template_test.go | 12 ++++++++ ...d_cluster_api_additional_trust_bundle.yaml | 28 +++++++++++++++++++ ...uster_api_server_cert_san_domain_name.yaml | 28 +++++++++++++++++++ ...pected_cluster_api_server_cert_san_ip.yaml | 28 +++++++++++++++++++ ...xpected_results_additional_categories.yaml | 28 +++++++++++++++++++ .../expected_results_etcd_encryption.yaml | 28 +++++++++++++++++++ ...expected_results_etcd_encryption_1_29.yaml | 28 +++++++++++++++++++ .../expected_results_external_etcd.yaml | 28 +++++++++++++++++++ ...d_results_external_etcd_with_optional.yaml | 28 +++++++++++++++++++ .../testdata/expected_results_iamauth.yaml | 28 +++++++++++++++++++ .../testdata/expected_results_irsa.yaml | 28 +++++++++++++++++++ .../expected_results_node_taints_labels.yaml | 28 +++++++++++++++++++ .../testdata/expected_results_oidc.yaml | 28 +++++++++++++++++++ .../testdata/expected_results_project.yaml | 28 +++++++++++++++++++ .../testdata/expected_results_proxy.yaml | 28 +++++++++++++++++++ .../expected_results_registry_mirror.yaml | 28 +++++++++++++++++++ 20 files changed, 483 insertions(+), 2 deletions(-) diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index c9d7694166f8..a660cc953668 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -781,3 +781,31 @@ spec: name: user-ca-bundle {{- end }} strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "{{.clusterName}}-nutanix-ccm-secret" + namespace: "{{.eksaSystemNamespace}}" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "{{ .nutanixPCUsername }}", + "password": "{{ .nutanixPCPassword }}" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/controlplane.go b/pkg/providers/nutanix/controlplane.go index c2b1fc4fd898..d774f8ba0e81 100644 --- a/pkg/providers/nutanix/controlplane.go +++ b/pkg/providers/nutanix/controlplane.go @@ -27,6 +27,7 @@ type ControlPlane struct { BaseControlPlane ConfigMaps []*corev1.ConfigMap ClusterResourceSets []*addonsv1.ClusterResourceSet + Secrets []*corev1.Secret } // Objects returns the control plane objects associated with the Nutanix cluster. @@ -34,6 +35,7 @@ func (p ControlPlane) Objects() []kubernetes.Object { o := p.BaseControlPlane.Objects() o = appendKubeObjects[*corev1.ConfigMap](o, p.ConfigMaps) o = appendKubeObjects[*addonsv1.ClusterResourceSet](o, p.ClusterResourceSets) + o = appendKubeObjects[*corev1.Secret](o, p.Secrets) return o } @@ -154,6 +156,12 @@ func newControlPlaneParser(logger logr.Logger) (*yamlutil.Parser, *ControlPlaneB return &addonsv1.ClusterResourceSet{} }, ), + yamlutil.NewMapping( + constants.SecretKind, + func() yamlutil.APIObject { + return &corev1.Secret{} + }, + ), ) if err != nil { @@ -183,6 +191,8 @@ func buildObjects(cp *ControlPlane, lookup yamlutil.ObjectLookup) { cp.ConfigMaps = append(cp.ConfigMaps, obj.(*corev1.ConfigMap)) case constants.ClusterResourceSetKind: cp.ClusterResourceSets = append(cp.ClusterResourceSets, obj.(*addonsv1.ClusterResourceSet)) + case constants.SecretKind: + cp.Secrets = append(cp.Secrets, obj.(*corev1.Secret)) } } } diff --git a/pkg/providers/nutanix/reconciler/reconciler.go b/pkg/providers/nutanix/reconciler/reconciler.go index 1ad3ba8a9fb3..2db9ca70ca08 100644 --- a/pkg/providers/nutanix/reconciler/reconciler.go +++ b/pkg/providers/nutanix/reconciler/reconciler.go @@ -3,6 +3,7 @@ package reconciler import ( "context" "fmt" + "os" "reflect" "github.com/go-logr/logr" @@ -137,6 +138,7 @@ func (r *Reconciler) reconcileClusterSecret(ctx context.Context, log logr.Logger // Reconcile reconciles the cluster to the desired state. func (r *Reconciler) Reconcile(ctx context.Context, log logr.Logger, c *anywherev1.Cluster) (controller.Result, error) { log = log.WithValues("provider", "nutanix") + clusterSpec, err := cluster.BuildSpec(ctx, clientutil.NewKubeClient(r.client), c) if err != nil { return controller.Result{}, err @@ -182,6 +184,9 @@ func (r *Reconciler) ValidateClusterSpec(ctx context.Context, log logr.Logger, c return controller.ResultWithReturn(), nil } + os.Setenv(constants.EksaNutanixUsernameKey, creds.PrismCentral.Username) + os.Setenv(constants.EksaNutanixPasswordKey, creds.PrismCentral.Password) + return controller.Result{}, nil } @@ -198,13 +203,16 @@ func (r *Reconciler) ReconcileControlPlane(ctx context.Context, log logr.Logger, } func toClientControlPlane(cp *nutanix.ControlPlane) *clusters.ControlPlane { - other := make([]client.Object, 0, len(cp.ConfigMaps)+len(cp.ClusterResourceSets)+1) + other := make([]client.Object, 0, len(cp.ConfigMaps)+len(cp.ClusterResourceSets)+len(cp.Secrets)+1) for _, o := range cp.ClusterResourceSets { other = append(other, o) } for _, o := range cp.ConfigMaps { other = append(other, o) } + for _, o := range cp.Secrets { + other = append(other, o) + } return &clusters.ControlPlane{ Cluster: cp.Cluster, diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index f8bfe8738d1b..1b0f0034513a 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -59,7 +59,7 @@ func (ntb *TemplateBuilder) GenerateCAPISpecControlPlane(clusterSpec *cluster.Sp etcdMachineSpec = *ntb.etcdMachineSpec } - values, err := buildTemplateMapCP(ntb.datacenterSpec, clusterSpec, *ntb.controlPlaneMachineSpec, etcdMachineSpec) + values, err := buildTemplateMapCP(ntb.datacenterSpec, clusterSpec, *ntb.controlPlaneMachineSpec, etcdMachineSpec, ntb.creds) if err != nil { return nil, err } @@ -156,6 +156,7 @@ func buildTemplateMapCP( clusterSpec *cluster.Spec, controlPlaneMachineSpec v1alpha1.NutanixMachineConfigSpec, etcdMachineSpec v1alpha1.NutanixMachineConfigSpec, + creds credentials.BasicAuthCredential, ) (map[string]interface{}, error) { versionsBundle := clusterSpec.RootVersionsBundle() format := "cloud-config" @@ -218,6 +219,8 @@ func buildTemplateMapCP( "subnetName": controlPlaneMachineSpec.Subnet.Name, "subnetUUID": controlPlaneMachineSpec.Subnet.UUID, "apiServerCertSANs": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.CertSANs, + "nutanixPCUsername": creds.PrismCentral.BasicAuth.Username, + "nutanixPCPassword": creds.PrismCentral.BasicAuth.Password, } if controlPlaneMachineSpec.Project != nil { diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index 37c52f79524b..9cdaf37f61a6 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -549,6 +549,9 @@ func TestTemplateBuilder_CertSANs(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, @@ -574,6 +577,9 @@ func TestTemplateBuilder_additionalTrustBundle(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, @@ -599,6 +605,9 @@ func TestTemplateBuilderEtcdEncryption(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, @@ -624,6 +633,9 @@ func TestTemplateBuilderEtcdEncryptionKubernetes129(t *testing.T) { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml index 8f7e16b55170..c7b78abe1447 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml @@ -641,3 +641,31 @@ spec: - kind: ConfigMap name: user-ca-bundle strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml index a3f90efaa920..e07e818237ea 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -582,3 +582,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml index 8402426982f9..be3eead5c104 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -582,3 +582,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml b/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml index 1eb2c0b146c1..f291088809f1 100644 --- a/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml @@ -586,3 +586,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml index a51fa434fb18..833a2ec9d811 100644 --- a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml @@ -631,3 +631,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml index f9693fd1c865..9e212a7a23c2 100644 --- a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml @@ -661,3 +661,31 @@ spec: - kind: Secret name: test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml index d106949f640f..fe8fbcd9ccc6 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml @@ -643,3 +643,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml index 0bb73d48d754..d60332022f26 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml @@ -657,3 +657,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml b/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml index a63d85d72451..cded83bf575d 100644 --- a/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml @@ -625,3 +625,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_irsa.yaml b/pkg/providers/nutanix/testdata/expected_results_irsa.yaml index a9ae7ec25f98..5f80eb7c2b1f 100644 --- a/pkg/providers/nutanix/testdata/expected_results_irsa.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_irsa.yaml @@ -582,3 +582,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml index 3327ef50f7f3..07326e5f9fee 100644 --- a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml @@ -591,3 +591,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_oidc.yaml b/pkg/providers/nutanix/testdata/expected_results_oidc.yaml index dc955647a810..0bafba10a28d 100644 --- a/pkg/providers/nutanix/testdata/expected_results_oidc.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_oidc.yaml @@ -583,3 +583,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_project.yaml b/pkg/providers/nutanix/testdata/expected_results_project.yaml index 678fbae3c53f..f42167b0b87e 100644 --- a/pkg/providers/nutanix/testdata/expected_results_project.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_project.yaml @@ -585,3 +585,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_proxy.yaml b/pkg/providers/nutanix/testdata/expected_results_proxy.yaml index ac8892124261..ce933ceca1ce 100644 --- a/pkg/providers/nutanix/testdata/expected_results_proxy.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_proxy.yaml @@ -590,3 +590,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml b/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml index eedffb23ace0..97c32981ec50 100644 --- a/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml @@ -631,3 +631,31 @@ spec: - kind: Secret name: eksa-unit-test-nutanix-ccm-secret strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set From 0e112c0d8febd75d531eb52f81b32fffccaade15 Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Fri, 24 May 2024 15:53:04 -0700 Subject: [PATCH 164/193] Kubelet configuration customization Docker cp and wn (#8187) --- .../anywhere.eks.amazonaws.com_clusters.yaml | 12 +- config/manifest/eksa-components.yaml | 12 +- go.mod | 1 + go.sum | 2 + pkg/api/v1alpha1/cluster.go | 49 +++++++ pkg/api/v1alpha1/cluster_test.go | 133 ++++++++++++++++++ pkg/api/v1alpha1/cluster_types.go | 9 +- pkg/api/v1alpha1/zz_generated.deepcopy.go | 8 ++ pkg/providers/docker/config/template-cp.yaml | 15 ++ pkg/providers/docker/config/template-md.yaml | 13 +- pkg/providers/docker/docker.go | 21 +++ pkg/providers/docker/docker_test.go | 59 ++++++-- 12 files changed, 322 insertions(+), 12 deletions(-) diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml index 492d5b4c0664..7033fcb0363e 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_clusters.yaml @@ -178,6 +178,11 @@ spec: required: - host type: object + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on control plane nodes. + type: object + x-kubernetes-preserve-unknown-fields: true labels: additionalProperties: type: string @@ -572,8 +577,13 @@ spec: description: Count defines the number of desired worker nodes. Defaults to 1. type: integer + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on worker nodes. + type: object + x-kubernetes-preserve-unknown-fields: true kubernetesVersion: - description: KuberenetesVersion defines the version for worker + description: KubernetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. type: string diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index 76ff8d1e65da..179d992644b2 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -3881,6 +3881,11 @@ spec: required: - host type: object + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on control plane nodes. + type: object + x-kubernetes-preserve-unknown-fields: true labels: additionalProperties: type: string @@ -4275,8 +4280,13 @@ spec: description: Count defines the number of desired worker nodes. Defaults to 1. type: integer + kubeletConfiguration: + description: KubeletConfiguration is a struct that exposes the + Kubelet settings for the user to set on worker nodes. + type: object + x-kubernetes-preserve-unknown-fields: true kubernetesVersion: - description: KuberenetesVersion defines the version for worker + description: KubernetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. type: string diff --git a/go.mod b/go.mod index 6fc0d9107cb0..2e49888ac537 100644 --- a/go.mod +++ b/go.mod @@ -196,6 +196,7 @@ require ( k8s.io/apiextensions-apiserver v0.29.1 // indirect k8s.io/cluster-bootstrap v0.28.5 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubelet v0.29.3 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 56291cd7858f..e59d62b47044 100644 --- a/go.sum +++ b/go.sum @@ -1507,6 +1507,8 @@ k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKf k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubelet v0.29.3 h1:X9h0ZHzc+eUeNTaksbN0ItHyvGhQ7Z0HPjnQD2oHdwU= +k8s.io/kubelet v0.29.3/go.mod h1:jDiGuTkFOUynyBKzOoC1xRSWlgAZ9UPcTYeFyjr6vas= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index c36230b0808b..9303ca3556b3 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -15,10 +15,12 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation/field" yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/kubelet/config/v1beta1" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/constants" @@ -192,6 +194,8 @@ var clusterConfigValidations = []func(*Cluster) error{ validateControlPlaneCertSANs, validateControlPlaneAPIServerExtraArgs, validateControlPlaneAPIServerOIDCExtraArgs, + validateControlPlaneKubeletConfiguration, + validateWorkerNodeKubeletConfiguration, } // GetClusterConfig parses a Cluster object from a multiobject yaml file in disk @@ -530,6 +534,51 @@ func validateControlPlaneAPIServerOIDCExtraArgs(clusterConfig *Cluster) error { return nil } +func validateControlPlaneKubeletConfiguration(clusterConfig *Cluster) error { + cpKubeletConfig := clusterConfig.Spec.ControlPlaneConfiguration.KubeletConfiguration + + return validateKubeletConfiguration(cpKubeletConfig) +} + +func validateWorkerNodeKubeletConfiguration(clusterConfig *Cluster) error { + workerNodeGroupConfigs := clusterConfig.Spec.WorkerNodeGroupConfigurations + + for _, workerNodeGroupConfig := range workerNodeGroupConfigs { + wnKubeletConfig := workerNodeGroupConfig.KubeletConfiguration + + if err := validateKubeletConfiguration(wnKubeletConfig); err != nil { + return err + } + } + + return nil +} + +func validateKubeletConfiguration(eksakubeconfig *unstructured.Unstructured) error { + if eksakubeconfig == nil { + return nil + } + + var kubeletConfig v1beta1.KubeletConfiguration + + kcString, err := yaml.Marshal(eksakubeconfig) + if err != nil { + return err + } + + _, err = yaml.YAMLToJSONStrict([]byte(kcString)) + if err != nil { + return fmt.Errorf("unmarshaling the yaml, malformed yaml %v", err) + } + + err = yaml.UnmarshalStrict(kcString, &kubeletConfig) + if err != nil { + return fmt.Errorf("unmarshaling KubeletConfiguration for %v", err) + } + + return nil +} + func validateWorkerNodeGroups(clusterConfig *Cluster) error { workerNodeGroupConfigs := clusterConfig.Spec.WorkerNodeGroupConfigurations if len(workerNodeGroupConfigs) <= 0 { diff --git a/pkg/api/v1alpha1/cluster_test.go b/pkg/api/v1alpha1/cluster_test.go index 5a7c5e8b097f..fc6ef7e2423f 100644 --- a/pkg/api/v1alpha1/cluster_test.go +++ b/pkg/api/v1alpha1/cluster_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/features" @@ -1096,6 +1097,138 @@ func TestGetAndValidateClusterConfig(t *testing.T) { } } +type clusterOpt func(c *Cluster) + +func baseCluster(opts ...clusterOpt) *Cluster { + c := &Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: ClusterSpec{ + ControlPlaneConfiguration: ControlPlaneConfiguration{ + Count: 1, + Endpoint: &Endpoint{ + Host: "1.1.1.1", + }, + MachineGroupRef: &Ref{}, + }, + WorkerNodeGroupConfigurations: []WorkerNodeGroupConfiguration{ + { + Count: ptr.Int(3), + MachineGroupRef: &Ref{ + Kind: VSphereMachineConfigKind, + Name: "eksa-unit-test-1", + }, + Name: "wn-1", + }, + }, + KubernetesVersion: Kube129, + ExternalEtcdConfiguration: &ExternalEtcdConfiguration{ + MachineGroupRef: &Ref{ + Kind: VSphereMachineConfigKind, + Name: "eksa-unit-test-etcd", + }, + Count: 1, + }, + DatacenterRef: Ref{ + Kind: VSphereDatacenterKind, + Name: "eksa-unit-test", + }, + ClusterNetwork: ClusterNetwork{ + CNIConfig: &CNIConfig{Cilium: &CiliumConfig{}}, + Pods: Pods{ + CidrBlocks: []string{"192.168.0.0/16"}, + }, + Services: Services{ + CidrBlocks: []string{"10.96.0.0/12"}, + }, + }, + }, + } + + for _, opt := range opts { + opt(c) + } + + return c +} + +func TestValidateClusterConfigContent(t *testing.T) { + tests := []struct { + testName string + cluster *Cluster + wantErr bool + err string + }{ + { + testName: "valid cluster without kubelet", + cluster: baseCluster(), + wantErr: false, + }, + { + testName: "valid cluster with kubelet config for cp and wn", + cluster: baseCluster(func(c *Cluster) { + c.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + c.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + }), + wantErr: false, + }, + { + testName: "invalid cluster with kubelet config for cp", + cluster: baseCluster(func(c *Cluster) { + c.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPodss": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + }), + wantErr: true, + err: "unknown field", + }, + { + testName: "invalid cluster with kubelet config for wn", + cluster: baseCluster(func(c *Cluster) { + c.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPodss": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + } + }), + wantErr: true, + err: "unknown field", + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + err := ValidateClusterConfigContent(tt.cluster) + if (err != nil) != tt.wantErr { + t.Fatalf("ValidateClusterConfigContent() error = %v, wantErr %v", err, tt.wantErr) + } + + if len(tt.err) > 0 && !strings.Contains(err.Error(), tt.err) { + t.Fatalf("ValidateClusterConfigContent() error = %s, wantErr %s", err.Error(), tt.err) + } + }) + } +} + func TestGetClusterConfig(t *testing.T) { tests := []struct { testName string diff --git a/pkg/api/v1alpha1/cluster_types.go b/pkg/api/v1alpha1/cluster_types.go index 54697ebac7ee..12628f72e938 100644 --- a/pkg/api/v1alpha1/cluster_types.go +++ b/pkg/api/v1alpha1/cluster_types.go @@ -8,6 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -309,6 +310,9 @@ type ControlPlaneConfiguration struct { MachineHealthCheck *MachineHealthCheck `json:"machineHealthCheck,omitempty"` // APIServerExtraArgs defines the flags to configure for the API server. APIServerExtraArgs map[string]string `json:"apiServerExtraArgs,omitempty"` + // KubeletConfiguration is a struct that exposes the Kubelet settings for the user to set on control plane nodes. + // +kubebuilder:pruning:PreserveUnknownFields + KubeletConfiguration *unstructured.Unstructured `json:"kubeletConfiguration,omitempty"` } // MachineHealthCheck allows to configure timeouts for machine health checks. Machine Health Checks are responsible for remediating unhealthy Machines. @@ -453,10 +457,13 @@ type WorkerNodeGroupConfiguration struct { // UpgradeRolloutStrategy determines the rollout strategy to use for rolling upgrades // and related parameters/knobs UpgradeRolloutStrategy *WorkerNodesUpgradeRolloutStrategy `json:"upgradeRolloutStrategy,omitempty"` - // KuberenetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. + // KubernetesVersion defines the version for worker nodes. If not set, the top level spec kubernetesVersion will be used. KubernetesVersion *KubernetesVersion `json:"kubernetesVersion,omitempty"` // MachineHealthCheck is a worker node level override for the timeouts and maxUnhealthy specified in the top-level MHC configuration. If not configured, the defaults in the top-level MHC configuration are used. MachineHealthCheck *MachineHealthCheck `json:"machineHealthCheck,omitempty"` + // KubeletConfiguration is a struct that exposes the Kubelet settings for the user to set on worker nodes. + // +kubebuilder:pruning:PreserveUnknownFields + KubeletConfiguration *unstructured.Unstructured `json:"kubeletConfiguration,omitempty"` } // Equal compares two WorkerNodeGroupConfigurations. diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index e77f5f744048..14f6a15e57f1 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -885,6 +885,10 @@ func (in *ControlPlaneConfiguration) DeepCopyInto(out *ControlPlaneConfiguration (*out)[key] = val } } + if in.KubeletConfiguration != nil { + in, out := &in.KubeletConfiguration, &out.KubeletConfiguration + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneConfiguration. @@ -3470,6 +3474,10 @@ func (in *WorkerNodeGroupConfiguration) DeepCopyInto(out *WorkerNodeGroupConfigu *out = new(MachineHealthCheck) (*in).DeepCopyInto(*out) } + if in.KubeletConfiguration != nil { + in, out := &in.KubeletConfiguration, &out.KubeletConfiguration + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerNodeGroupConfiguration. diff --git a/pkg/providers/docker/config/template-cp.yaml b/pkg/providers/docker/config/template-cp.yaml index 2cb630b6f527..b9f8434e7bc9 100644 --- a/pkg/providers/docker/config/template-cp.yaml +++ b/pkg/providers/docker/config/template-cp.yaml @@ -137,6 +137,13 @@ spec: {{ .schedulerExtraArgs.ToYaml | indent 10 }} {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8}} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} - content: | {{ .auditPolicy | indent 8 }} owner: root:root @@ -209,6 +216,10 @@ spec: path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem {{- end}} initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: @@ -230,6 +241,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: diff --git a/pkg/providers/docker/config/template-md.yaml b/pkg/providers/docker/config/template-md.yaml index 94d8867e7437..b0e9cc8592ce 100644 --- a/pkg/providers/docker/config/template-md.yaml +++ b/pkg/providers/docker/config/template-md.yaml @@ -7,6 +7,10 @@ spec: template: spec: joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock {{- if .workerNodeGroupTaints }} @@ -26,9 +30,16 @@ spec: {{- if .kubeletExtraArgs }} {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} -{{- if .registryMirrorMap }} +{{- if or .registryMirrorMap .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .registryCACert }} - content: | {{ .registryCACert | indent 10 }} diff --git a/pkg/providers/docker/docker.go b/pkg/providers/docker/docker.go index 6fc48d335f9a..77e91dd9d908 100644 --- a/pkg/providers/docker/docker.go +++ b/pkg/providers/docker/docker.go @@ -12,6 +12,7 @@ import ( etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/bootstrapper" @@ -358,6 +359,16 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro values["maxSurge"] = clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("marshaling control plane node Kubelet Configuration while building CAPI template %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } @@ -397,6 +408,16 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration } } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("marshaling Kubelet Configuration for worker node %s: %v", workerNodeGroupConfiguration.Name, err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } diff --git a/pkg/providers/docker/docker_test.go b/pkg/providers/docker/docker_test.go index b5df9fa043a5..774ee109f1e4 100644 --- a/pkg/providers/docker/docker_test.go +++ b/pkg/providers/docker/docker_test.go @@ -14,6 +14,7 @@ import ( . "github.com/onsi/gomega" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -812,10 +813,9 @@ func TestDockerTemplateBuilderGenerateCAPISpecControlPlane(t *testing.T) { buildOptions []providers.BuildMapOption } tests := []struct { - name string - args args - wantContent []byte - wantErr error + name string + args args + wantErr error }{ { name: "kube 119 test", @@ -838,6 +838,28 @@ func TestDockerTemplateBuilderGenerateCAPISpecControlPlane(t *testing.T) { }, wantErr: fmt.Errorf("error building template map for CP "), }, + { + name: "kubelet config specified", + args: args{ + clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { + s.Cluster.Name = "test-cluster" + s.Cluster.Spec.ControlPlaneConfiguration = v1alpha1.ControlPlaneConfiguration{ + KubeletConfiguration: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + }, + Count: 1, + Endpoint: &v1alpha1.Endpoint{ + Host: "1.1.1.1", + }, + } + }), + }, + wantErr: nil, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -896,10 +918,9 @@ func TestDockerTemplateBuilderGenerateCAPISpecWorkers(t *testing.T) { clusterSpec *cluster.Spec } tests := []struct { - name string - args args - wantContent []byte - wantErr error + name string + args args + wantErr error }{ { name: "kube version not specified", @@ -911,6 +932,28 @@ func TestDockerTemplateBuilderGenerateCAPISpecWorkers(t *testing.T) { }, wantErr: fmt.Errorf("error building template map for MD "), }, + { + name: "kubelet config specified", + args: args{ + clusterSpec: test.NewClusterSpec(func(s *cluster.Spec) { + s.Cluster.Name = "test-cluster" + s.Cluster.Spec.WorkerNodeGroupConfigurations = []v1alpha1.WorkerNodeGroupConfiguration{ + { + KubeletConfiguration: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "kind": "KubeletConfiguration", + }, + }, + Count: ptr.Int(1), + Name: "test", + }, + } + }), + }, + wantErr: nil, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 7abe87c5026d4a47dc488401997b6b53a90a8caf Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Tue, 28 May 2024 13:55:29 -0700 Subject: [PATCH 165/193] Fix 1-30 tests using wrong templates/kube version (#8224) * Fix 1-30 tests using wrong templates/kube version and skip packages tests for 1.30 Signed-off-by: Rahul Ganesh * Rebase to factor in new vsphere tests Signed-off-by: Rahul Ganesh --------- Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/SKIPPED_TESTS.yaml | 17 +++++++++++++++++ test/e2e/cloudstack_test.go | 4 ++-- test/e2e/tinkerbell_test.go | 6 +++--- test/e2e/vsphere_test.go | 8 ++++---- 4 files changed, 26 insertions(+), 9 deletions(-) diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index 676d19098138..f62c9f0d24b6 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -45,6 +45,23 @@ skipped_tests: - TestVSphereKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow - TestVSphereKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow - TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesSimpleFlow +- TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesCertManagerSimpleFlow +- TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesCertManagerSimpleFlow +- TestVSphereKubernetes130UbuntuWorkloadClusterCuratedPackagesEmissarySimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesClusterAutoscalerSimpleFlow +- TestVSphereKubernetes130BottleRocketCuratedPackagesPrometheusSimpleFlow +- TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesSimpleFlow +- TestVSphereKubernetes130BottleRocketWorkloadClusterCuratedPackagesEmissarySimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesEmissarySimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesHarborSimpleFlow +- TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesPrometheusSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesAdotUpdateFlow +- TestCloudStackKubernetes130RedHatCuratedPackagesClusterAutoscalerSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesAdotSimpleFlow +- TestCloudStackKubernetes130RedhatCuratedPackagesCertManagerSimpleFlow +- TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow # Snow - TestSnowKubernetes125SimpleFlow diff --git a/test/e2e/cloudstack_test.go b/test/e2e/cloudstack_test.go index 0c4953d3a0cb..02bc373b4d5d 100644 --- a/test/e2e/cloudstack_test.go +++ b/test/e2e/cloudstack_test.go @@ -932,7 +932,7 @@ func TestCloudStackKubernetes129To130GitFluxUpgrade(t *testing.T) { test, v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) } @@ -1766,7 +1766,7 @@ func TestCloudStackKubernetes129To130OIDCUpgrade(t *testing.T) { ) runUpgradeFlowWithOIDC( test, - v1alpha1.Kube126, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), ) diff --git a/test/e2e/tinkerbell_test.go b/test/e2e/tinkerbell_test.go index bf25eafc4a0c..e3766fd3a06a 100644 --- a/test/e2e/tinkerbell_test.go +++ b/test/e2e/tinkerbell_test.go @@ -139,7 +139,7 @@ func TestTinkerbellKubernetes129UbuntuTo130UpgradeWorkerOnly(t *testing.T) { t, provider, framework.WithClusterFiller(), - framework.WithClusterFiller(api.WithKubernetesVersion(kube129)), + framework.WithClusterFiller(api.WithKubernetesVersion(kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), framework.WithClusterFiller(api.WithWorkerKubernetesVersion(nodeGroupLabel1, &kube129)), @@ -1534,7 +1534,7 @@ func TestTinkerbellKubernetes130UbuntuWorkerNodeGroupsTaintsAndLabels(t *testing framework.WithCustomTinkerbellMachineConfig(nodeGroupLabel2), ), framework.WithClusterFiller( - api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneLabel(cpKey1, cpVal1), api.WithControlPlaneTaints([]corev1.Taint{framework.NoScheduleTaint()}), api.RemoveAllWorkerNodeGroups(), // This gives us a blank slate @@ -1587,7 +1587,7 @@ func TestTinkerbellAirgappedKubernetes130UbuntuProxyConfigFlow(t *testing.T) { func TestTinkerbellKubernetes130UbuntuOOB(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewTinkerbell(t, framework.WithUbuntu129Tinkerbell()), + framework.NewTinkerbell(t, framework.WithUbuntu130Tinkerbell()), framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithOOBConfiguration(), framework.WithControlPlaneHardware(1), diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 33a1de6e51c3..9697473f23af 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -234,7 +234,7 @@ func TestVSphereKubernetes130BottleRocketAWSIamAuth(t *testing.T) { t, framework.NewVSphere(t, framework.WithBottleRocket130()), framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runAWSIamAuthFlow(test) } @@ -2628,7 +2628,7 @@ func TestVSphereKubernetes130BottleRocketWithNTP(t *testing.T) { test := framework.NewClusterE2ETest( t, framework.NewVSphere( - t, framework.WithBottleRocket129(), + t, framework.WithBottleRocket130(), framework.WithNTPServersForAllMachines(), framework.WithSSHAuthorizedKeyForAllMachines(""), // set SSH key to empty ), @@ -3527,7 +3527,7 @@ func TestVSphereKubernetes130UbuntuControlPlaneNodeUpgrade(t *testing.T) { ) runSimpleUpgradeFlow( test, - v1alpha1.Kube129, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithControlPlaneCount(3)), ) } @@ -4150,7 +4150,7 @@ func TestVSphereKubernetes129To130UbuntuInPlaceUpgradeFromLatestMinorRelease(t * test.GenerateClusterConfigForVersion(release.Version, framework.ExecuteWithEksaRelease(release)) test.UpdateClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube130), + api.WithKubernetesVersion(v1alpha1.Kube129), api.WithStackedEtcdTopology(), ), api.VSphereToConfigFiller( From 249f20de206b4394c110c4d9bf8d0d26168959bd Mon Sep 17 00:00:00 2001 From: Cavaughn Browne <113555337+cxbrowne1207@users.noreply.github.com> Date: Tue, 28 May 2024 19:52:34 -0500 Subject: [PATCH 166/193] Add poweroff hardware cleanup step after Tinkerbell E2E tests (#8140) * power down hardware in clean up step after tests fix linting errors * address PR comments * change cleanup machines to cleanup resources * moved context timeout inside poweroff hardware; leftover machines -> resources --- .../buildspecs/cloudstack-test-eks-a-cli.yml | 2 +- .../buildspecs/conformance-eks-a-cli.yml | 2 +- .../buildspecs/nutanix-test-eks-a-cli.yml | 2 +- .../build/buildspecs/quick-test-eks-a-cli.yml | 2 +- .../build/buildspecs/snow-test-eks-a-cli.yml | 2 +- .../buildspecs/tinkerbell-test-eks-a-cli.yml | 2 +- .../buildspecs/vsphere-test-eks-a-cli.yml | 2 +- cmd/integration_test/cmd/run.go | 8 +- internal/test/cleanup/cleanup.go | 89 +++++++++++++++++++ internal/test/e2e/run.go | 8 +- internal/test/e2e/setup.go | 6 +- test/e2e/README.md | 2 +- test/e2e/cloudstack_test.go | 12 +-- test/e2e/vsphere_test.go | 4 +- test/framework/cloudstack.go | 3 +- test/framework/cluster.go | 77 ++++------------ test/framework/docker.go | 4 +- test/framework/etcdencryption.go | 5 +- test/framework/nutanix.go | 4 +- test/framework/snow.go | 4 +- test/framework/tinkerbell.go | 6 +- test/framework/vsphere.go | 4 +- 22 files changed, 149 insertions(+), 101 deletions(-) diff --git a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml index 8e70a0250f07..50584ea748c2 100644 --- a/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/cloudstack-test-eks-a-cli.yml @@ -107,7 +107,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml index 558aec8a6301..d9bb3ea7bbd5 100644 --- a/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/conformance-eks-a-cli.yml @@ -163,7 +163,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports reports: e2e-reports: diff --git a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml index deff7c9d3483..58ab4254c7c0 100644 --- a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml @@ -90,7 +90,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index c69cd496f74b..ee22ccc83ede 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -216,7 +216,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml index 124c1033913b..c7fe1db38e79 100644 --- a/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/snow-test-eks-a-cli.yml @@ -57,7 +57,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml index 67f9c5ffdddb..9759bb0c6b25 100644 --- a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml @@ -104,7 +104,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml index 0e6d327238bc..6758074837a0 100644 --- a/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/vsphere-test-eks-a-cli.yml @@ -130,7 +130,7 @@ phases: -v 4 --skip ${SKIPPED_TESTS} --bundles-override=${BUNDLES_OVERRIDE} - --cleanup-vms=true + --cleanup-resources=true --test-report-folder=reports --branch-name=${BRANCH_NAME} --baremetal-branch=${BAREMETAL_BRANCH} diff --git a/cmd/integration_test/cmd/run.go b/cmd/integration_test/cmd/run.go index 5e3aee6d0f85..fbb4580cc77f 100644 --- a/cmd/integration_test/cmd/run.go +++ b/cmd/integration_test/cmd/run.go @@ -22,7 +22,7 @@ const ( maxConcurrentTestsFlagName = "max-concurrent-tests" skipFlagName = "skip" bundlesOverrideFlagName = "bundles-override" - cleanupVmsFlagName = "cleanup-vms" + cleanupResourcesFlagName = "cleanup-resources" testReportFolderFlagName = "test-report-folder" branchNameFlagName = "branch-name" instanceConfigFlagName = "instance-config" @@ -66,7 +66,7 @@ func init() { runE2ECmd.Flags().IntP(maxConcurrentTestsFlagName, "p", 1, "Maximum number of parallel tests that can be run at a time") runE2ECmd.Flags().StringSlice(skipFlagName, nil, "List of tests to skip") runE2ECmd.Flags().Bool(bundlesOverrideFlagName, false, "Flag to indicate if the tests should run with a bundles override") - runE2ECmd.Flags().Bool(cleanupVmsFlagName, false, "Flag to indicate if VSphere VMs should be cleaned up automatically as tests complete") + runE2ECmd.Flags().Bool(cleanupResourcesFlagName, false, "Flag to indicate if test resources should be cleaned up automatically as tests complete") runE2ECmd.Flags().String(testReportFolderFlagName, "", "Folder destination for JUnit tests reports") runE2ECmd.Flags().String(branchNameFlagName, "main", "EKS-A origin branch from where the tests are being run") runE2ECmd.Flags().String(baremetalBranchFlagName, "main", "Branch for baremetal tests to run on") @@ -88,7 +88,7 @@ func runE2E(ctx context.Context) error { maxConcurrentTests := viper.GetInt(maxConcurrentTestsFlagName) testsToSkip := viper.GetStringSlice(skipFlagName) bundlesOverride := viper.GetBool(bundlesOverrideFlagName) - cleanupVms := viper.GetBool(cleanupVmsFlagName) + cleanupResources := viper.GetBool(cleanupResourcesFlagName) testReportFolder := viper.GetString(testReportFolderFlagName) branchName := viper.GetString(branchNameFlagName) baremetalBranchName := viper.GetString(baremetalBranchFlagName) @@ -102,7 +102,7 @@ func runE2E(ctx context.Context) error { Regex: testRegex, TestsToSkip: testsToSkip, BundlesOverride: bundlesOverride, - CleanupVms: cleanupVms, + CleanupResources: cleanupResources, TestReportFolder: testReportFolder, BranchName: branchName, TestInstanceConfigFile: instanceConfigFile, diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 023d3e025094..67c7ec16e334 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -5,19 +5,25 @@ import ( "fmt" "os" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go/aws/session" + "github.com/bmc-toolbox/bmclib/v2" + "github.com/go-logr/logr" prismgoclient "github.com/nutanix-cloud-native/prism-go-client" v3 "github.com/nutanix-cloud-native/prism-go-client/v3" + "github.com/aws/eks-anywhere/internal/pkg/api" "github.com/aws/eks-anywhere/internal/pkg/ec2" "github.com/aws/eks-anywhere/internal/pkg/s3" + "github.com/aws/eks-anywhere/pkg/errors" "github.com/aws/eks-anywhere/pkg/executables" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/providers/nutanix" + "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware" "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/validations" ) @@ -189,3 +195,86 @@ func NutanixTestResources(clusterName, endpoint, port string, insecure, ignoreEr } return nil } + +// TinkerbellTestResources cleans up machines by powering them down. +func TinkerbellTestResources(inventoryCSVFilePath string, ignoreErrors bool) error { + hardwarePool, err := api.NewHardwareMapFromFile(inventoryCSVFilePath) + if err != nil { + return fmt.Errorf("failed to create hardware map from inventory csv: %v", err) + } + + logger.Info("Powering off hardware: %+v", hardwarePool) + return powerOffHardwarePool(hardwarePool, ignoreErrors) +} + +func powerOffHardwarePool(hardware map[string]*hardware.Machine, ignoreErrors bool) error { + errList := []error{} + for _, h := range hardware { + if err := powerOffHardware(h, ignoreErrors); err != nil { + errList = append(errList, err) + } + } + + if len(errList) > 0 { + return fmt.Errorf("failed to power off %d hardware: %+v", len(errList), errors.NewAggregate(errList)) + } + + return nil +} + +func powerOffHardware(h *hardware.Machine, ignoreErrors bool) (reterror error) { + ctx, done := context.WithTimeout(context.Background(), 2*time.Minute) + defer done() + bmcClient := newBmclibClient(logr.Discard(), h.BMCIPAddress, h.BMCUsername, h.BMCPassword) + + if err := bmcClient.Open(ctx); err != nil { + md := bmcClient.GetMetadata() + logger.Info("Warning: Failed to open connection to BMC: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) + return handlePowerOffHardwareError(err, ignoreErrors) + } + + md := bmcClient.GetMetadata() + logger.Info("Connected to BMC: hardware: %v, providersAttempted: %v, successfulProvider: %v", h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) + + defer func() { + if err := bmcClient.Close(ctx); err != nil { + md := bmcClient.GetMetadata() + logger.Info("Warning: BMC close connection failed: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.FailedProviderDetail) + reterror = handlePowerOffHardwareError(err, ignoreErrors) + } + }() + + state, err := bmcClient.GetPowerState(ctx) + if err != nil { + state = "unknown" + } + if strings.Contains(strings.ToLower(state), "off") { + return nil + } + + if _, err := bmcClient.SetPowerState(ctx, "off"); err != nil { + md := bmcClient.GetMetadata() + logger.Info("Warning: failed to power off hardware: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) + return handlePowerOffHardwareError(err, ignoreErrors) + } + + return nil +} + +func handlePowerOffHardwareError(err error, ignoreErrors bool) error { + if err != nil && !ignoreErrors { + return err + } + return nil +} + +// newBmclibClient creates a new BMClib client. +func newBmclibClient(log logr.Logger, hostIP, username, password string) *bmclib.Client { + o := []bmclib.Option{} + log = log.WithValues("host", hostIP, "username", username) + o = append(o, bmclib.WithLogger(log)) + client := bmclib.NewClient(hostIP, username, password, o...) + client.Registry.Drivers = client.Registry.PreferProtocol("redfish") + + return client +} diff --git a/internal/test/e2e/run.go b/internal/test/e2e/run.go index e5c787811b1a..e5b97abaa5b9 100644 --- a/internal/test/e2e/run.go +++ b/internal/test/e2e/run.go @@ -47,7 +47,7 @@ type ParallelRunConf struct { Regex string TestsToSkip []string BundlesOverride bool - CleanupVms bool + CleanupResources bool TestReportFolder string BranchName string BaremetalBranchName string @@ -199,7 +199,7 @@ type instanceRunConf struct { BundlesOverride bool TestRunnerType TestRunnerType TestRunnerConfig TestInfraConfig - CleanupVMs bool + CleanupResources bool Logger logr.Logger Session *session.Session } @@ -231,7 +231,7 @@ func RunTests(conf instanceRunConf, inventoryCatalogue map[string]*hardwareCatal "branch_name", conf.BranchName, "ip_pool", conf.IPPool.ToString(), "hardware_count", conf.HardwareCount, "tinkerbell_airgapped_test", conf.TinkerbellAirgappedTest, "bundles_override", conf.BundlesOverride, "test_runner_type", conf.TestRunnerType, - "cleanup_vms", conf.CleanupVMs) + "cleanup_resources", conf.CleanupResources) instanceId, err := testRunner.createInstance(conf) if err != nil { @@ -519,7 +519,7 @@ func newInstanceRunConf(awsSession *session.Session, conf ParallelRunConf, jobNu BundlesOverride: conf.BundlesOverride, TestReportFolder: conf.TestReportFolder, BranchName: conf.BranchName, - CleanupVMs: conf.CleanupVms, + CleanupResources: conf.CleanupResources, TestRunnerType: testRunnerType, TestRunnerConfig: *testRunnerConfig, Logger: conf.Logger.WithValues("jobID", jobID, "test", testRegex), diff --git a/internal/test/e2e/setup.go b/internal/test/e2e/setup.go index e6ea140a88ca..ee66c8bcd3c8 100644 --- a/internal/test/e2e/setup.go +++ b/internal/test/e2e/setup.go @@ -40,7 +40,7 @@ type E2ESession struct { ipPool networkutils.IPPool testEnvVars map[string]string bundlesOverride bool - cleanupVms bool + cleanup bool requiredFiles []string branchName string hardware []*api.Hardware @@ -57,7 +57,7 @@ func newE2ESession(instanceId string, conf instanceRunConf) (*E2ESession, error) ipPool: conf.IPPool, testEnvVars: make(map[string]string), bundlesOverride: conf.BundlesOverride, - cleanupVms: conf.CleanupVMs, + cleanup: conf.CleanupResources, requiredFiles: requiredFiles, branchName: conf.BranchName, hardware: conf.Hardware, @@ -187,7 +187,7 @@ func (e *E2ESession) setup(regex string) error { // Adding JobId to Test Env variables e.testEnvVars[e2etests.JobIdVar] = e.jobId e.testEnvVars[e2etests.BundlesOverrideVar] = strconv.FormatBool(e.bundlesOverride) - e.testEnvVars[e2etests.CleanupVmsVar] = strconv.FormatBool(e.cleanupVms) + e.testEnvVars[e2etests.CleanupResourcesVar] = strconv.FormatBool(e.cleanup) if e.branchName != "" { e.testEnvVars[e2etests.BranchNameEnvVar] = e.branchName diff --git a/test/e2e/README.md b/test/e2e/README.md index 41e3eb87fd20..fa0325a124c6 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -45,7 +45,7 @@ In order to use bundle overrides, take your bundle overrides yaml file and move You will also need to set the environment variable `T_BUNDLES_OVERRIDE=true` ### Cleaning up VM's after a test run -In order to clean up VM's after a test runs automatically, set `T_CLEANUP_VMS=true` +In order to clean up VM's after a test runs automatically, set `T_CLEANUP_RESOURCES=true` ## VSphere tests requisites The following env variables need to be set: diff --git a/test/e2e/cloudstack_test.go b/test/e2e/cloudstack_test.go index 02bc373b4d5d..4c0842220670 100644 --- a/test/e2e/cloudstack_test.go +++ b/test/e2e/cloudstack_test.go @@ -3292,12 +3292,12 @@ func TestCloudStackKubernetes126RedhatTo127UpgradeWithCheckpoint(t *testing.T) { ) clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes126Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube127)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) runUpgradeFlowWithCheckpoint( test, @@ -3322,12 +3322,12 @@ func TestCloudStackKubernetes127RedhatTo128UpgradeWithCheckpoint(t *testing.T) { ) clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes127Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes128Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes128Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) runUpgradeFlowWithCheckpoint( test, @@ -3352,12 +3352,12 @@ func TestCloudStackKubernetes129RedhatTo130UpgradeWithCheckpoint(t *testing.T) { ) clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes129Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes129Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) commandOpts := []framework.CommandOpt{framework.WithExternalEtcdWaitTimeout("10m")} clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + provider.WithProviderUpgrade(provider.Redhat9Kubernetes130Template()), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) runUpgradeFlowWithCheckpoint( test, diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 9697473f23af..3944b669ddf2 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -3924,12 +3924,12 @@ func TestVSphereKubernetes127UbuntuTo128UpgradeWithCheckpoint(t *testing.T) { ) clusterOpts = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(true), - provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolforCPMachines(vsphereInvalidResourcePoolUpdateVar)), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "false")) + provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolforCPMachines(vsphereInvalidResourcePoolUpdateVar)), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "false")) commandOpts := []framework.CommandOpt{framework.WithControlPlaneWaitTimeout("10m")} clusterOpts2 = append(clusterOpts, framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube128)), framework.ExpectFailure(false), - provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolForAllMachines(os.Getenv(vsphereResourcePoolVar))), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupVmsVar, "true")) + provider.WithProviderUpgrade(provider.Ubuntu128Template(), api.WithResourcePoolForAllMachines(os.Getenv(vsphereResourcePoolVar))), framework.WithEnvVar(features.CheckpointEnabledEnvVar, "true"), framework.WithEnvVar(framework.CleanupResourcesVar, "true")) runUpgradeFlowWithCheckpoint( test, diff --git a/test/framework/cloudstack.go b/test/framework/cloudstack.go index a676cf1b28dc..ee1040862b66 100644 --- a/test/framework/cloudstack.go +++ b/test/framework/cloudstack.go @@ -264,7 +264,8 @@ func (c *CloudStack) ClusterConfigUpdates() []api.ClusterConfigFiller { return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...), api.CloudStackToConfigFiller(c.fillers...)} } -func (c *CloudStack) CleanupVMs(clusterName string) error { +// CleanupResources satisfies the test framework Provider. +func (c *CloudStack) CleanupResources(clusterName string) error { return cleanup.CloudstackTestResources(context.Background(), clusterName, false, false) } diff --git a/test/framework/cluster.go b/test/framework/cluster.go index 4a179abb195a..333024bab648 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -61,7 +61,7 @@ const ( BundlesOverrideVar = "T_BUNDLES_OVERRIDE" ClusterIPPoolEnvVar = "T_CLUSTER_IP_POOL" ClusterIPEnvVar = "T_CLUSTER_IP" - CleanupVmsVar = "T_CLEANUP_VMS" + CleanupResourcesVar = "T_CLEANUP_RESOURCES" hardwareYamlPath = "hardware.yaml" hardwareCsvPath = "hardware.csv" EksaPackagesInstallation = "eks-anywhere-packages" @@ -148,7 +148,7 @@ func NewClusterE2ETest(t T, provider Provider, opts ...ClusterE2ETestOpt) *Clust provider.Setup() e.T.Cleanup(func() { - e.CleanupVms() + e.cleanupResources() tinkerbellCIEnvironment := os.Getenv(TinkerbellCIEnvironment) if e.Provider.Name() == tinkerbellProviderName && tinkerbellCIEnvironment == "true" { @@ -341,7 +341,7 @@ type Provider interface { // Prefer to call UpdateClusterConfig directly from the tests to make it more explicit. ClusterConfigUpdates() []api.ClusterConfigFiller Setup() - CleanupVMs(clusterName string) error + CleanupResources(clusterName string) error UpdateKubeConfig(content *[]byte, clusterName string) error ClusterStateValidations() []clusterf.StateValidation WithKubeVersionAndOS(kubeVersion v1alpha1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller @@ -362,53 +362,8 @@ func newBmclibClient(log logr.Logger, hostIP, username, password string) *bmclib return client } -// powerOffHardware issues power off calls to all Hardware. This function does not fail the test if it encounters an error. -// This function is a helper and not part of the code path that we are testing. -// For this reason, we are only logging the errors and not failing the test. -// This function exists not because we need the hardware to be powered off before a test run, -// but because we want to make sure that no other Tinkerbell Boots DHCP server is running. -// Another Boots DHCP server running can cause netboot issues with hardware. -func (e *ClusterE2ETest) powerOffHardware() { - for _, h := range e.TestHardware { - ctx, done := context.WithTimeout(context.Background(), 2*time.Minute) - defer done() - bmcClient := newBmclibClient(logr.Discard(), h.BMCIPAddress, h.BMCUsername, h.BMCPassword) - - if err := bmcClient.Open(ctx); err != nil { - md := bmcClient.GetMetadata() - e.T.Logf("Failed to open connection to BMC: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) - - continue - } - md := bmcClient.GetMetadata() - e.T.Logf("Connected to BMC: hardware: %v, providersAttempted: %v, successfulProvider: %v", h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) - - defer func() { - if err := bmcClient.Close(ctx); err != nil { - md := bmcClient.GetMetadata() - e.T.Logf("BMC close connection failed: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.FailedProviderDetail) - } - }() - - state, err := bmcClient.GetPowerState(ctx) - if err != nil { - state = "unknown" - } - if strings.Contains(strings.ToLower(state), "off") { - return - } - - if _, err := bmcClient.SetPowerState(ctx, "off"); err != nil { - md := bmcClient.GetMetadata() - e.T.Logf("failed to power off hardware: %v, hardware: %v, providersAttempted: %v, failedProviderDetail: %v", err, h.BMCIPAddress, md.ProvidersAttempted, md.SuccessfulOpenConns) - continue - } - } -} - // ValidateHardwareDecommissioned checks that the all hardware was powered off during the cluster deletion. -// This function tests that the hardware was powered off during the cluster deletion. If any hardware are not powered off -// this func calls powerOffHardware to power off the hardware and then fails this test. +// This function tests that the hardware was powered off during the cluster deletion. func (e *ClusterE2ETest) ValidateHardwareDecommissioned() { var failedToDecomm []*api.Hardware for _, h := range e.TestHardware { @@ -460,7 +415,6 @@ func (e *ClusterE2ETest) ValidateHardwareDecommissioned() { } if len(failedToDecomm) > 0 { - e.powerOffHardware() e.T.Fatalf("failed to decommission all hardware during cluster deletion") } } @@ -913,16 +867,17 @@ func (e *ClusterE2ETest) DeleteCluster(opts ...CommandOpt) { e.deleteCluster(opts...) } -// CleanupVms is a helper to clean up VMs. It is a noop if the T_CLEANUP_VMS environment variable +// cleanupResources is a helper to clean up test resources. It is a noop if the T_CLEANUP_RESOURCES environment variable // is false or unset. -func (e *ClusterE2ETest) CleanupVms() { - if !shouldCleanUpVms() { - e.T.Logf("Skipping VM cleanup") +func (e *ClusterE2ETest) cleanupResources() { + if !shouldCleanUpResources() { + e.T.Logf("Skipping provider resource cleanup") return } - if err := e.Provider.CleanupVMs(e.ClusterName); err != nil { - e.T.Logf("failed to clean up VMs: %v", err) + e.T.Logf("Cleaning up provider resources") + if err := e.Provider.CleanupResources(e.ClusterName); err != nil { + e.T.Logf("failed to clean up %s test resouces: %v", e.Provider.Name(), err) } } @@ -933,9 +888,9 @@ func (e *ClusterE2ETest) CleanupDockerEnvironment() { e.Run("docker", "rm", "-vf", "$(docker ps -a -q)", "||", "true") } -func shouldCleanUpVms() bool { - shouldCleanupVms, err := getCleanupVmsVar() - return err == nil && shouldCleanupVms +func shouldCleanUpResources() bool { + shouldCleanupResources, err := getCleanupResourcesVar() + return err == nil && shouldCleanupResources } func (e *ClusterE2ETest) deleteCluster(opts ...CommandOpt) { @@ -1118,8 +1073,8 @@ func getBundlesOverride() string { return os.Getenv(BundlesOverrideVar) } -func getCleanupVmsVar() (bool, error) { - return strconv.ParseBool(os.Getenv(CleanupVmsVar)) +func getCleanupResourcesVar() (bool, error) { + return strconv.ParseBool(os.Getenv(CleanupResourcesVar)) } func setEksctlVersionEnvVar() error { diff --git a/test/framework/docker.go b/test/framework/docker.go index 58842e1f5717..bac169e8ae1a 100644 --- a/test/framework/docker.go +++ b/test/framework/docker.go @@ -39,8 +39,8 @@ func (d *Docker) Name() string { // Setup implements the Provider interface. func (d *Docker) Setup() {} -// CleanupVMs implements the Provider interface. -func (d *Docker) CleanupVMs(_ string) error { +// CleanupResources implements the Provider interface. +func (d *Docker) CleanupResources(_ string) error { return nil } diff --git a/test/framework/etcdencryption.go b/test/framework/etcdencryption.go index 088dbb55f2a9..4d30ad31c550 100644 --- a/test/framework/etcdencryption.go +++ b/test/framework/etcdencryption.go @@ -192,7 +192,7 @@ func (e *ClusterE2ETest) PostClusterCreateEtcdEncryptionSetup() { } // register cleanup step to remove the keys from s3 after the test is done - e.T.Cleanup(e.cleanup) + e.T.Cleanup(e.cleanupKeysFromOIDCConfig) if err := e.deployPodIdentityWebhook(ctx, envVars); err != nil { e.T.Fatal(err) @@ -203,7 +203,8 @@ func (e *ClusterE2ETest) PostClusterCreateEtcdEncryptionSetup() { } } -func (e *ClusterE2ETest) cleanup() { +// cleanup removes the cluster's key from the IAM OIDC config. +func (e *ClusterE2ETest) cleanupKeysFromOIDCConfig() { e.T.Log("Removing cluster's key from the IAM OIDC config") data, err := os.ReadFile(fmt.Sprintf(keyIDFilenameFormat, e.ClusterName)) if err != nil { diff --git a/test/framework/nutanix.go b/test/framework/nutanix.go index ecde5892e811..0e3aa0929176 100644 --- a/test/framework/nutanix.go +++ b/test/framework/nutanix.go @@ -158,8 +158,8 @@ func (n *Nutanix) UpdateKubeConfig(content *[]byte, clusterName string) error { return nil } -// CleanupVMs satisfies the test framework Provider. -func (n *Nutanix) CleanupVMs(clustername string) error { +// CleanupResources satisfies the test framework Provider. +func (n *Nutanix) CleanupResources(clustername string) error { return cleanup.NutanixTestResources(clustername, os.Getenv(nutanixEndpoint), os.Getenv(nutanixPort), true, true) } diff --git a/test/framework/snow.go b/test/framework/snow.go index a326c5af1ab3..9e0d5cc67ea0 100644 --- a/test/framework/snow.go +++ b/test/framework/snow.go @@ -102,8 +102,8 @@ func (s *Snow) ClusterConfigUpdates() []api.ClusterConfigFiller { return []api.ClusterConfigFiller{api.ClusterToConfigFiller(f...), api.SnowToConfigFiller(s.fillers...)} } -// CleanupVMs satisfies the test framework Provider. -func (s *Snow) CleanupVMs(clusterName string) error { +// CleanupResources satisfies the test framework Provider. +func (s *Snow) CleanupResources(clusterName string) error { snowDeviceIPs := strings.Split(os.Getenv(snowDevices), ",") s.t.Logf("Cleaning ec2 instances of %s in snow devices: %v", clusterName, snowDeviceIPs) diff --git a/test/framework/tinkerbell.go b/test/framework/tinkerbell.go index 8c521240c979..1e6b89fc7525 100644 --- a/test/framework/tinkerbell.go +++ b/test/framework/tinkerbell.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/aws/eks-anywhere/internal/pkg/api" + "github.com/aws/eks-anywhere/internal/test/cleanup" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" releasev1 "github.com/aws/eks-anywhere/release/api/v1alpha1" clusterf "github.com/aws/eks-anywhere/test/framework/cluster" @@ -146,8 +147,9 @@ func (t *Tinkerbell) WithProviderUpgrade(fillers ...api.TinkerbellFiller) Cluste } } -func (t *Tinkerbell) CleanupVMs(_ string) error { - return nil +// CleanupResources runs a clean up the Tinkerbell machines which simply powers them down. +func (t *Tinkerbell) CleanupResources(_ string) error { + return cleanup.TinkerbellTestResources(t.inventoryCsvFilePath, true) } // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right image for all diff --git a/test/framework/vsphere.go b/test/framework/vsphere.go index 822b9f39c0ad..1fc0a138ffb3 100644 --- a/test/framework/vsphere.go +++ b/test/framework/vsphere.go @@ -441,8 +441,8 @@ func (v *VSphere) WithBottleRocket125() api.ClusterConfigFiller { return v.WithKubeVersionAndOS(anywherev1.Kube125, Bottlerocket1, nil) } -// CleanupVMs deletes all the VMs owned by the test EKS-A cluster. It satisfies the test framework Provider. -func (v *VSphere) CleanupVMs(clusterName string) error { +// CleanupResources deletes all the VMs owned by the test EKS-A cluster. It satisfies the test framework Provider. +func (v *VSphere) CleanupResources(clusterName string) error { return cleanup.CleanUpVsphereTestResources(context.Background(), clusterName) } From 4b014405b098cc35fcb4986f07ef0617f2c17450 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Wed, 29 May 2024 22:54:30 -0700 Subject: [PATCH 167/193] Skip latest minor tests creating 130 cluster on previous release (#8227) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/SKIPPED_TESTS.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index f62c9f0d24b6..5a60d2180b5c 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -63,6 +63,11 @@ skipped_tests: - TestCloudStackKubernetes130RedhatCuratedPackagesCertManagerSimpleFlow - TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow +# UpgradeFromLatestTests for new K8s version (expected to work only after the release is out) +- TestDockerKubernetes130AirgappedUpgradeFromLatestRegistryMirrorAndCert +- TestDockerKubernetes129to130UpgradeFromLatestMinorReleaseAPI +- TestCloudStackKubernetes130WithOIDCManagementClusterUpgradeFromLatestSideEffects + # Snow - TestSnowKubernetes125SimpleFlow - TestSnowKubernetes126SimpleFlow From 5101a6a19e6ce7f9b52c5135243513059580e477 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Thu, 30 May 2024 10:49:17 -0700 Subject: [PATCH 168/193] [PR BOT] Generate release testdata files (#8218) --- .../testdata/main-bundle-release.yaml | 60 +++++++++---------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 99ca48b2fe64..a4543c62bba8 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -447,11 +447,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -462,8 +462,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml - version: v1.3.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -1225,11 +1225,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -1240,8 +1240,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml - version: v1.3.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -2003,11 +2003,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2018,8 +2018,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml - version: v1.3.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -2781,11 +2781,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2796,8 +2796,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml - version: v1.3.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -3559,11 +3559,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -3574,8 +3574,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml - version: v1.3.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: @@ -4337,11 +4337,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -4352,8 +4352,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.4/metadata.yaml - version: v1.3.4+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml + version: v1.3.3+abcdef1 packageController: credentialProviderPackage: arch: From b05e21b587a6fbcb5f5fa3379aeeb9193bb32a45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 11:21:31 -0700 Subject: [PATCH 169/193] Bump sigs.k8s.io/controller-runtime in /release/cli (#8222) Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.18.2 to 0.18.3. - [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases) - [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md) - [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.18.2...v0.18.3) --- updated-dependencies: - dependency-name: sigs.k8s.io/controller-runtime dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 14 +++++++------- release/cli/go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 80492680e7ba..a43a944fff56 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -17,9 +17,9 @@ require ( github.com/spf13/viper v1.18.2 golang.org/x/sync v0.7.0 helm.sh/helm/v3 v3.15.0 - k8s.io/apimachinery v0.30.0 + k8s.io/apimachinery v0.30.1 k8s.io/helm v2.17.0+incompatible - sigs.k8s.io/controller-runtime v0.18.2 + sigs.k8s.io/controller-runtime v0.18.3 sigs.k8s.io/yaml v1.4.0 ) @@ -164,12 +164,12 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.30.0 // indirect - k8s.io/apiextensions-apiserver v0.30.0 // indirect - k8s.io/apiserver v0.30.0 // indirect + k8s.io/api v0.30.1 // indirect + k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/apiserver v0.30.1 // indirect k8s.io/cli-runtime v0.30.0 // indirect - k8s.io/client-go v0.30.0 // indirect - k8s.io/component-base v0.30.0 // indirect + k8s.io/client-go v0.30.1 // indirect + k8s.io/component-base v0.30.1 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubectl v0.30.0 // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index 16ef77be5e03..cc5a8a157721 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -874,26 +874,26 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= -k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= -k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= -k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= -k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= +k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= +k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= k8s.io/cli-runtime v0.30.0 h1:0vn6/XhOvn1RJ2KJOC6IRR2CGqrpT6QQF4+8pYpWQ48= k8s.io/cli-runtime v0.30.0/go.mod h1:vATpDMATVTMA79sZ0YUCzlMelf6rUjoBzlp+RnoM+cg= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= -k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o= -k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/helm v2.17.0+incompatible h1:Bpn6o1wKLYqKM3+Osh8e+1/K2g/GsQJ4F4yNF2+deao= @@ -919,8 +919,8 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= -sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLqlNpx+Q= -sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw= +sigs.k8s.io/controller-runtime v0.18.3 h1:B5Wmmo8WMWK7izei+2LlXLVDGzMwAHBNLX68lwtlSR4= +sigs.k8s.io/controller-runtime v0.18.3/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= From 3bff455bd3c6376e3779ccce65b613bd277b53e6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 11:31:14 -0700 Subject: [PATCH 170/193] Bump github.com/aws/aws-sdk-go from 1.53.8 to 1.53.12 in /release/cli (#8230) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.53.8 to 1.53.12. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.53.8...v1.53.12) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index a43a944fff56..579c70c4e242 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.22.3 require ( - github.com/aws/aws-sdk-go v1.53.8 + github.com/aws/aws-sdk-go v1.53.12 github.com/aws/aws-sdk-go-v2 v1.27.0 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index cc5a8a157721..6e12d8f36c2b 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k= -github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.12 h1:8f8K+YaTy2qwtGwVIo2Ftq22UCH96xQAX7Q0lyZKDiA= +github.com/aws/aws-sdk-go v1.53.12/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo= github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= From a2a19920f4b7b54f6bc21f608ee5ecd5c6f0c45b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 11:57:30 -0700 Subject: [PATCH 171/193] Bump helm.sh/helm/v3 from 3.15.0 to 3.15.1 in /release/cli (#8223) Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.15.0 to 3.15.1. - [Release notes](https://github.com/helm/helm/releases) - [Commits](https://github.com/helm/helm/compare/v3.15.0...v3.15.1) --- updated-dependencies: - dependency-name: helm.sh/helm/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 579c70c4e242..609fb83c0f7b 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -16,7 +16,7 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 golang.org/x/sync v0.7.0 - helm.sh/helm/v3 v3.15.0 + helm.sh/helm/v3 v3.15.1 k8s.io/apimachinery v0.30.1 k8s.io/helm v2.17.0+incompatible sigs.k8s.io/controller-runtime v0.18.3 diff --git a/release/cli/go.sum b/release/cli/go.sum index 6e12d8f36c2b..0db305bb5311 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -868,8 +868,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.15.0 h1:gcLxHeFp0Hfo7lYi6KIZ84ZyvlAnfFRSJ8lTL3zvG5U= -helm.sh/helm/v3 v3.15.0/go.mod h1:fvfoRcB8UKRUV5jrIfOTaN/pG1TPhuqSb56fjYdTKXg= +helm.sh/helm/v3 v3.15.1 h1:22ztacHz4gMqhXNqCQ9NAg6BFWoRUryNLvnkz6OVyw0= +helm.sh/helm/v3 v3.15.1/go.mod h1:fvfoRcB8UKRUV5jrIfOTaN/pG1TPhuqSb56fjYdTKXg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 5daadb2b5b4c27adcab3d71dbfe2cc74fe36fe89 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 30 May 2024 15:06:07 -0700 Subject: [PATCH 172/193] Allow downloading private objects from S3 (#8229) --- release/cli/pkg/assets/archives/archives.go | 2 +- .../cli/pkg/assets/config/bundle_release.go | 2 +- release/cli/pkg/assets/manifests/manifests.go | 2 +- release/cli/pkg/aws/s3/s3.go | 62 ++++++++++++++----- release/cli/pkg/clients/clients.go | 15 +++-- release/cli/pkg/filereader/file_reader.go | 13 +++- release/cli/pkg/images/images.go | 9 ++- release/cli/pkg/operations/download.go | 55 +++++++++++----- release/cli/pkg/operations/upload.go | 6 +- release/cli/pkg/types/types.go | 4 +- release/cli/pkg/util/artifacts/artifacts.go | 4 +- release/cli/pkg/util/release/release.go | 7 ++- 12 files changed, 129 insertions(+), 52 deletions(-) diff --git a/release/cli/pkg/assets/archives/archives.go b/release/cli/pkg/assets/archives/archives.go index 4dc1046ce884..4044c5c49615 100644 --- a/release/cli/pkg/assets/archives/archives.go +++ b/release/cli/pkg/assets/archives/archives.go @@ -235,7 +235,7 @@ func GetArchiveAssets(rc *releasetypes.ReleaseConfig, archive *assettypes.Archiv ProjectPath: projectPath, SourcedFromBranch: sourcedFromBranch, ImageFormat: archive.Format, - PrivateUpload: archive.Private, + Private: archive.Private, } return archiveArtifact, nil diff --git a/release/cli/pkg/assets/config/bundle_release.go b/release/cli/pkg/assets/config/bundle_release.go index 10fa9526dc22..287a878f9aec 100644 --- a/release/cli/pkg/assets/config/bundle_release.go +++ b/release/cli/pkg/assets/config/bundle_release.go @@ -74,7 +74,7 @@ var bundleReleaseAssetsConfigMap = []assettypes.AssetConfig{ Archives: []*assettypes.Archive{ { Name: "rtos", - Format: "ami", + Format: "raw", OSName: "ubuntu", OSVersion: "22.04", ArchiveS3PathGetter: archives.RTOSArtifactPathGetter, diff --git a/release/cli/pkg/assets/manifests/manifests.go b/release/cli/pkg/assets/manifests/manifests.go index 7407f8d49f9f..6960363c9322 100644 --- a/release/cli/pkg/assets/manifests/manifests.go +++ b/release/cli/pkg/assets/manifests/manifests.go @@ -68,7 +68,7 @@ func GetManifestAssets(rc *releasetypes.ReleaseConfig, manifestComponent *assett ProjectPath: projectPath, SourcedFromBranch: sourcedFromBranch, Component: componentName, - PrivateUpload: manifestComponent.Private, + Private: manifestComponent.Private, } return manifestArtifact, nil diff --git a/release/cli/pkg/aws/s3/s3.go b/release/cli/pkg/aws/s3/s3.go index ecb922321c5a..27e98f7a6444 100644 --- a/release/cli/pkg/aws/s3/s3.go +++ b/release/cli/pkg/aws/s3/s3.go @@ -22,6 +22,7 @@ import ( "path/filepath" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/pkg/errors" @@ -43,7 +44,7 @@ func Read(bucket, key string) (io.ReadCloser, error) { return resp.Body, nil } -func DownloadFile(filePath, bucket, key string) error { +func DownloadFile(filePath, bucket, key string, s3Downloader *s3manager.Downloader, private bool) error { if err := os.MkdirAll(filepath.Dir(filePath), 0o755); err != nil { return errors.Cause(err) } @@ -54,15 +55,25 @@ func DownloadFile(filePath, bucket, key string) error { } defer fd.Close() - body, err := Read(bucket, key) - if err != nil { - return err - } - - defer body.Close() - - if _, err = io.Copy(fd, body); err != nil { - return err + if private { + _, err = s3Downloader.Download(fd, &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + return err + } + } else { + body, err := Read(bucket, key) + if err != nil { + return err + } + + defer body.Close() + + if _, err = io.Copy(fd, body); err != nil { + return err + } } return nil @@ -93,13 +104,30 @@ func UploadFile(filePath string, bucket, key *string, s3Uploader *s3manager.Uplo return nil } -func KeyExists(bucket, key string) bool { - objectUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucket, key) - - resp, err := http.Head(objectUrl) - if err != nil || resp.StatusCode != http.StatusOK { - return false +func KeyExists(s3Client *s3.S3, bucket, key string, private bool) (bool, error) { + if private { + _, err := s3Client.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { + return false, nil + } + return false, fmt.Errorf("calling S3 HeadObject API to check if object is present: %v", err) + } + } else { + objectUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucket, key) + + resp, err := http.Head(objectUrl) + if err != nil { + return false, fmt.Errorf("making HTTP HEAD request to check if object is present: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return false, nil + } } - return true + return true, nil } diff --git a/release/cli/pkg/clients/clients.go b/release/cli/pkg/clients/clients.go index ab22cda2d33c..88cbeca53476 100644 --- a/release/cli/pkg/clients/clients.go +++ b/release/cli/pkg/clients/clients.go @@ -41,7 +41,8 @@ type ReleaseClients struct { } type SourceS3Clients struct { - Client *s3.S3 + Client *s3.S3 + Downloader *s3manager.Downloader } type ReleaseS3Clients struct { @@ -88,6 +89,7 @@ func CreateDevReleaseClients(dryRun bool) (*SourceClients, *ReleaseClients, erro // S3 client and uploader s3Client := s3.New(pdxSession) + downloader := s3manager.NewDownloader(pdxSession) uploader := s3manager.NewUploader(pdxSession) // Get source ECR auth config @@ -107,7 +109,8 @@ func CreateDevReleaseClients(dryRun bool) (*SourceClients, *ReleaseClients, erro // Constructing source clients sourceClients := &SourceClients{ S3: &SourceS3Clients{ - Client: s3Client, + Client: s3Client, + Downloader: downloader, }, ECR: &SourceECRClient{ EcrClient: ecrClient, @@ -162,6 +165,7 @@ func CreateStagingReleaseClients() (*SourceClients, *ReleaseClients, error) { // Release S3 client and uploader releaseS3Client := s3.New(releaseSession) + downloader := s3manager.NewDownloader(releaseSession) uploader := s3manager.NewUploader(releaseSession) // Get source ECR auth config @@ -181,7 +185,8 @@ func CreateStagingReleaseClients() (*SourceClients, *ReleaseClients, error) { // Constructing source clients sourceClients := &SourceClients{ S3: &SourceS3Clients{ - Client: sourceS3Client, + Client: sourceS3Client, + Downloader: downloader, }, ECR: &SourceECRClient{ EcrClient: ecrClient, @@ -237,6 +242,7 @@ func CreateProdReleaseClients() (*SourceClients, *ReleaseClients, error) { // Release S3 client and uploader releaseS3Client := s3.New(releaseSession) + downloader := s3manager.NewDownloader(releaseSession) uploader := s3manager.NewUploader(releaseSession) // Get source ECR Public auth config @@ -256,7 +262,8 @@ func CreateProdReleaseClients() (*SourceClients, *ReleaseClients, error) { // Constructing release clients sourceClients := &SourceClients{ S3: &SourceS3Clients{ - Client: sourceS3Client, + Client: sourceS3Client, + Downloader: downloader, }, ECR: &SourceECRClient{ EcrPublicClient: sourceEcrPublicClient, diff --git a/release/cli/pkg/filereader/file_reader.go b/release/cli/pkg/filereader/file_reader.go index 9ad874f468d2..077155625970 100644 --- a/release/cli/pkg/filereader/file_reader.go +++ b/release/cli/pkg/filereader/file_reader.go @@ -180,6 +180,9 @@ func GetEksDReleaseManifestUrl(releaseChannel, releaseNumber string, dev bool) s // GetNextEksADevBuildNumber computes next eksa dev build number for the current eks-a dev build func GetNextEksADevBuildNumber(releaseVersion string, r *releasetypes.ReleaseConfig) (int, error) { + if r.DryRun { + return 0, nil + } tempFileName := "latest-dev-release-version" var latestReleaseKey, latestBuildVersion string @@ -189,8 +192,14 @@ func GetNextEksADevBuildNumber(releaseVersion string, r *releasetypes.ReleaseCon } else { latestReleaseKey = fmt.Sprintf("%s/LATEST_RELEASE_VERSION", r.BuildRepoBranchName) } - if s3.KeyExists(r.ReleaseBucket, latestReleaseKey) { - err := s3.DownloadFile(tempFileName, r.ReleaseBucket, latestReleaseKey) + + keyExists, err := s3.KeyExists(r.ReleaseClients.S3.Client, r.ReleaseBucket, latestReleaseKey, false) + if err != nil { + return -1, errors.Cause(err) + } + + if keyExists { + err := s3.DownloadFile(tempFileName, r.ReleaseBucket, latestReleaseKey, r.SourceClients.S3.Downloader, false) if err != nil { return -1, errors.Cause(err) } diff --git a/release/cli/pkg/images/images.go b/release/cli/pkg/images/images.go index eba40a04bc84..51e42f68fcf2 100644 --- a/release/cli/pkg/images/images.go +++ b/release/cli/pkg/images/images.go @@ -83,7 +83,7 @@ func PollForExistence(devRelease bool, authConfig *docker.AuthConfiguration, ima bodyStr := string(body) if strings.Contains(bodyStr, "MANIFEST_UNKNOWN") { - return fmt.Errorf("requested image not found") + return fmt.Errorf("requested image not found: %v", imageUri) } return nil @@ -321,7 +321,12 @@ func GetPreviousReleaseImageSemver(r *releasetypes.ReleaseConfig, releaseImageUr bundles := &anywherev1alpha1.Bundles{} bundleReleaseManifestKey := r.BundlesManifestFilepath() bundleManifestUrl := fmt.Sprintf("https://%s.s3.amazonaws.com/%s", r.ReleaseBucket, bundleReleaseManifestKey) - if s3.KeyExists(r.ReleaseBucket, bundleReleaseManifestKey) { + + keyExists, err := s3.KeyExists(r.ReleaseClients.S3.Client, r.ReleaseBucket, bundleReleaseManifestKey, false) + if err != nil { + return "", fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", bundleReleaseManifestKey, err) + } + if keyExists { contents, err := filereader.ReadHttpFile(bundleManifestUrl) if err != nil { return "", fmt.Errorf("Error reading bundle manifest from S3: %v", err) diff --git a/release/cli/pkg/operations/download.go b/release/cli/pkg/operations/download.go index 64c5b63be6f8..f3c87ed7eb62 100644 --- a/release/cli/pkg/operations/download.go +++ b/release/cli/pkg/operations/download.go @@ -21,6 +21,8 @@ import ( "strings" "time" + s3sdk "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -42,6 +44,12 @@ func DownloadArtifacts(ctx context.Context, r *releasetypes.ReleaseConfig, eksaA } return false, 0 })) + var s3Client *s3sdk.S3 + var s3Downloader *s3manager.Downloader + if !r.DryRun { + s3Client = r.SourceClients.S3.Client + s3Downloader = r.SourceClients.S3.Downloader + } fmt.Println("==========================================================") fmt.Println(" Artifacts Download") fmt.Println("==========================================================") @@ -55,12 +63,12 @@ func DownloadArtifacts(ctx context.Context, r *releasetypes.ReleaseConfig, eksaA errGroup.Go(func() error { // Check if there is an archive to be downloaded if artifact.Archive != nil { - return handleArchiveDownload(ctx, r, artifact, s3Retrier) + return handleArchiveDownload(ctx, r, artifact, s3Retrier, s3Client, s3Downloader) } // Check if there is a manifest to be downloaded if artifact.Manifest != nil { - return handleManifestDownload(ctx, r, artifact, s3Retrier) + return handleManifestDownload(ctx, r, artifact, s3Retrier, s3Client, s3Downloader) } return nil @@ -76,7 +84,7 @@ func DownloadArtifacts(ctx context.Context, r *releasetypes.ReleaseConfig, eksaA return nil } -func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier) error { +func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier, s3Client *s3sdk.S3, s3Downloader *s3manager.Downloader) error { sourceS3Prefix := artifact.Archive.SourceS3Prefix sourceS3Key := artifact.Archive.SourceS3Key artifactPath := artifact.Archive.ArtifactPath @@ -87,8 +95,13 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art fmt.Println("Skipping OS image downloads in dry-run mode") } else { err := s3Retrier.Retry(func() error { - if !s3.KeyExists(r.SourceBucket, objectKey) { - return fmt.Errorf("requested object not found") + keyExists, err := s3.KeyExists(s3Client, r.SourceBucket, objectKey, artifact.Archive.Private) + if err != nil { + return fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", objectKey, err) + } + + if !keyExists { + return fmt.Errorf("requested object not found: %v", objectKey) } return nil }) @@ -107,11 +120,11 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art } objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key) } else { - return fmt.Errorf("retries exhausted waiting for archive [%s] to be uploaded to source location: %v", objectKey, err) + return fmt.Errorf("retries exhausted waiting for source archive [%s] to be available for download: %v", objectKey, err) } } - err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey) + err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey, s3Downloader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -136,8 +149,13 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art fmt.Printf("Checksum file - %s\n", objectShasumFileKey) err := s3Retrier.Retry(func() error { - if !s3.KeyExists(r.SourceBucket, objectShasumFileKey) { - return fmt.Errorf("requested object not found") + keyExists, err := s3.KeyExists(s3Client, r.SourceBucket, objectShasumFileKey, artifact.Archive.Private) + if err != nil { + return fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", objectShasumFileKey, err) + } + + if !keyExists { + return fmt.Errorf("requested object not found: %v", objectShasumFileKey) } return nil }) @@ -156,11 +174,11 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art } objectShasumFileKey = filepath.Join(latestSourceS3PrefixFromMain, objectShasumFileName) } else { - return fmt.Errorf("retries exhausted waiting for checksum file [%s] to be uploaded to source location: %v", objectShasumFileKey, err) + return fmt.Errorf("retries exhausted waiting for source checksum file [%s] to be available for download: %v", objectShasumFileKey, err) } } - err = s3.DownloadFile(objectShasumFileLocalFilePath, r.SourceBucket, objectShasumFileKey) + err = s3.DownloadFile(objectShasumFileLocalFilePath, r.SourceBucket, objectShasumFileKey, s3Downloader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -170,7 +188,7 @@ func handleArchiveDownload(_ context.Context, r *releasetypes.ReleaseConfig, art return nil } -func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier) error { +func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, artifact releasetypes.Artifact, s3Retrier *retrier.Retrier, s3Client *s3sdk.S3, s3Downloader *s3manager.Downloader) error { sourceS3Prefix := artifact.Manifest.SourceS3Prefix sourceS3Key := artifact.Manifest.SourceS3Key artifactPath := artifact.Manifest.ArtifactPath @@ -179,8 +197,13 @@ func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, ar fmt.Printf("Manifest - %s\n", objectKey) err := s3Retrier.Retry(func() error { - if !s3.KeyExists(r.SourceBucket, objectKey) { - return fmt.Errorf("requested object not found") + keyExists, err := s3.KeyExists(s3Client, r.SourceBucket, objectKey, artifact.Manifest.Private) + if err != nil { + return fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", objectKey, err) + } + + if !keyExists { + return fmt.Errorf("requested object not found: %v", objectKey) } return nil }) @@ -194,11 +217,11 @@ func handleManifestDownload(_ context.Context, r *releasetypes.ReleaseConfig, ar latestSourceS3PrefixFromMain := strings.NewReplacer(r.BuildRepoBranchName, "latest", artifact.Manifest.GitTag, gitTagFromMain).Replace(sourceS3Prefix) objectKey = filepath.Join(latestSourceS3PrefixFromMain, sourceS3Key) } else { - return fmt.Errorf("retries exhausted waiting for manifest [%s] to be uploaded to source location: %v", objectKey, err) + return fmt.Errorf("retries exhausted waiting for source manifest [%s] to be available for download: %v", objectKey, err) } } - err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey) + err = s3.DownloadFile(objectLocalFilePath, r.SourceBucket, objectKey, s3Downloader, artifact.Manifest.Private) if err != nil { return errors.Cause(err) } diff --git a/release/cli/pkg/operations/upload.go b/release/cli/pkg/operations/upload.go index bf972ad7646e..d5fa633a1f28 100644 --- a/release/cli/pkg/operations/upload.go +++ b/release/cli/pkg/operations/upload.go @@ -92,7 +92,7 @@ func handleArchiveUpload(_ context.Context, r *releasetypes.ReleaseConfig, artif archiveFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName) fmt.Printf("Archive - %s\n", archiveFile) key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName) - err := s3.UploadFile(archiveFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.PrivateUpload) + err := s3.UploadFile(archiveFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -109,7 +109,7 @@ func handleArchiveUpload(_ context.Context, r *releasetypes.ReleaseConfig, artif checksumFile := filepath.Join(artifact.Archive.ArtifactPath, artifact.Archive.ReleaseName) + extension fmt.Printf("Checksum - %s\n", checksumFile) key := filepath.Join(artifact.Archive.ReleaseS3Path, artifact.Archive.ReleaseName) + extension - err := s3.UploadFile(checksumFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.PrivateUpload) + err := s3.UploadFile(checksumFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Archive.Private) if err != nil { return errors.Cause(err) } @@ -122,7 +122,7 @@ func handleManifestUpload(_ context.Context, r *releasetypes.ReleaseConfig, arti manifestFile := filepath.Join(artifact.Manifest.ArtifactPath, artifact.Manifest.ReleaseName) fmt.Printf("Manifest - %s\n", manifestFile) key := filepath.Join(artifact.Manifest.ReleaseS3Path, artifact.Manifest.ReleaseName) - err := s3.UploadFile(manifestFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Manifest.PrivateUpload) + err := s3.UploadFile(manifestFile, aws.String(r.ReleaseBucket), aws.String(key), r.ReleaseClients.S3.Uploader, artifact.Manifest.Private) if err != nil { return errors.Cause(err) } diff --git a/release/cli/pkg/types/types.go b/release/cli/pkg/types/types.go index 3bb7f374b8c6..245ed7feb1c3 100644 --- a/release/cli/pkg/types/types.go +++ b/release/cli/pkg/types/types.go @@ -77,7 +77,7 @@ type ArchiveArtifact struct { ProjectPath string SourcedFromBranch string ImageFormat string - PrivateUpload bool + Private bool } type ImageArtifact struct { @@ -103,7 +103,7 @@ type ManifestArtifact struct { ProjectPath string SourcedFromBranch string Component string - PrivateUpload bool + Private bool } type Artifact struct { diff --git a/release/cli/pkg/util/artifacts/artifacts.go b/release/cli/pkg/util/artifacts/artifacts.go index 55c2fc309c82..7281ae7c0a57 100644 --- a/release/cli/pkg/util/artifacts/artifacts.go +++ b/release/cli/pkg/util/artifacts/artifacts.go @@ -23,11 +23,11 @@ import ( ) func IsObjectNotFoundError(err error) bool { - return err.Error() == "requested object not found" + return strings.Contains(err.Error(), "requested object not found") } func IsImageNotFoundError(err error) bool { - return err.Error() == "requested image not found" + return strings.Contains(err.Error(), "requested image not found") } func GetFakeSHA(hashType int) (string, error) { diff --git a/release/cli/pkg/util/release/release.go b/release/cli/pkg/util/release/release.go index 48f74b2d80ce..1d8842a3e0e0 100644 --- a/release/cli/pkg/util/release/release.go +++ b/release/cli/pkg/util/release/release.go @@ -40,7 +40,12 @@ func GetPreviousReleaseIfExists(r *releasetypes.ReleaseConfig) (*anywherev1alpha release := &anywherev1alpha1.Release{} eksAReleaseManifestKey := r.ReleaseManifestFilepath() - if !s3.KeyExists(r.ReleaseBucket, eksAReleaseManifestKey) { + keyExists, err := s3.KeyExists(r.ReleaseClients.S3.Client, r.ReleaseBucket, eksAReleaseManifestKey, false) + if err != nil { + return nil, fmt.Errorf("checking if object [%s] is present in S3 bucket: %v", eksAReleaseManifestKey, err) + } + + if !keyExists { return emptyRelease, nil } From 431ef3d802c23e7989068c059e554ad3e74ce922 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 30 May 2024 17:07:33 -0700 Subject: [PATCH 173/193] Add Tinkerbell E2E test for Ubuntu 22.04 RTOS image (#8225) --- .../buildspecs/tinkerbell-test-eks-a-cli.yml | 1 + internal/test/e2e/tinkerbell.go | 3 +- test/e2e/TINKERBELL_HARDWARE_COUNT.yaml | 3 + test/e2e/tinkerbell_test.go | 53 +++++++++++++ test/framework/cloudstack.go | 2 +- test/framework/cluster.go | 4 +- test/framework/docker.go | 2 +- test/framework/nutanix.go | 2 +- test/framework/snow.go | 2 +- test/framework/tinkerbell.go | 76 +++++++++++-------- test/framework/vsphere.go | 4 +- 11 files changed, 110 insertions(+), 42 deletions(-) diff --git a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml index 9759bb0c6b25..f16b3e232abe 100644 --- a/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/tinkerbell-test-eks-a-cli.yml @@ -44,6 +44,7 @@ env: T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_29_RTOS: "tinkerbell_ci:image_ubuntu_2204_1_29_rtos" T_TINKERBELL_IMAGE_UBUNTU_2204_1_30: "tinkerbell_ci:image_ubuntu_2204_1_30" T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" diff --git a/internal/test/e2e/tinkerbell.go b/internal/test/e2e/tinkerbell.go index a9ca4532662d..796cf449841b 100644 --- a/internal/test/e2e/tinkerbell.go +++ b/internal/test/e2e/tinkerbell.go @@ -23,6 +23,7 @@ const ( maxHardwarePerE2ETestEnvVar = "T_TINKERBELL_MAX_HARDWARE_PER_TEST" tinkerbellDefaultMaxHardwarePerE2ETest = 4 tinkerbellBootstrapInterfaceEnvVar = "T_TINKERBELL_BOOTSTRAP_INTERFACE" + tinkerbellCIEnvironmentEnvVar = "T_TINKERBELL_CI_ENVIRONMENT" ) // TinkerbellTest maps each Tinkbell test with the hardware count needed for the test. @@ -80,7 +81,7 @@ func (e *E2ESession) setupTinkerbellEnv(testRegex string) error { } e.testEnvVars[tinkerbellInventoryCsvFilePathEnvVar] = inventoryFilePath - e.testEnvVars[e2etests.TinkerbellCIEnvironment] = "true" + e.testEnvVars[tinkerbellCIEnvironmentEnvVar] = "true" return nil } diff --git a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml index 49d1f909819b..74e723771283 100644 --- a/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml +++ b/test/e2e/TINKERBELL_HARDWARE_COUNT.yaml @@ -66,11 +66,13 @@ TestTinkerbellKubernetes129UbuntuTo130Upgrade: 4 TestTinkerbellKubernetes126To127Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes127To128Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes128To129Ubuntu2204Upgrade: 4 +TestTinkerbellKubernetes128To129Ubuntu2204RTOSUpgrade: 4 TestTinkerbellKubernetes129To130Ubuntu2204Upgrade: 4 TestTinkerbellKubernetes126Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes127Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes128Ubuntu2004To2204Upgrade: 4 TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade: 4 +TestTinkerbellKubernetes129Ubuntu2004To2204RTOSUpgrade: 4 TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade: 4 TestTinkerbellUpgrade130MulticlusterWorkloadClusterWorkerScaleupGitFluxWithAPI: 4 TestTinkerbellUpgrade130MulticlusterWorkloadClusterCPScaleup: 6 @@ -91,6 +93,7 @@ TestTinkerbellKubernetes126Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes127Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes128Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes129Ubuntu2204SimpleFlow: 2 +TestTinkerbellKubernetes129Ubuntu2204RTOSSimpleFlow: 2 TestTinkerbellKubernetes130Ubuntu2204SimpleFlow: 2 TestTinkerbellKubernetes126RedHatSimpleFlow: 2 TestTinkerbellKubernetes127RedHatSimpleFlow: 2 diff --git a/test/e2e/tinkerbell_test.go b/test/e2e/tinkerbell_test.go index e3766fd3a06a..ca7335761db9 100644 --- a/test/e2e/tinkerbell_test.go +++ b/test/e2e/tinkerbell_test.go @@ -216,6 +216,26 @@ func TestTinkerbellKubernetes128To129Ubuntu2204Upgrade(t *testing.T) { ) } +func TestTinkerbellKubernetes128To129Ubuntu2204RTOSUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube128, framework.Ubuntu2204, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes129RTOSImage()), + ) +} + func TestTinkerbellKubernetes129To130Ubuntu2204Upgrade(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( @@ -316,6 +336,26 @@ func TestTinkerbellKubernetes129Ubuntu2004To2204Upgrade(t *testing.T) { ) } +func TestTinkerbellKubernetes129Ubuntu2004To2204RTOSUpgrade(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithControlPlaneHardware(2), + framework.WithWorkerHardware(2), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2004, nil), + ) + runSimpleUpgradeFlowForBaremetalWithoutClusterConfigGeneration( + test, + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube129)), + provider.WithProviderUpgrade(framework.Ubuntu2204Kubernetes129RTOSImage()), + ) +} + func TestTinkerbellKubernetes130Ubuntu2004To2204Upgrade(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( @@ -1346,6 +1386,19 @@ func TestTinkerbellKubernetes129Ubuntu2204SimpleFlow(t *testing.T) { runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) } +func TestTinkerbellKubernetes129Ubuntu2204RTOSSimpleFlow(t *testing.T) { + provider := framework.NewTinkerbell(t) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithControlPlaneHardware(1), + framework.WithWorkerHardware(1), + ).WithClusterConfig( + provider.WithKubeVersionAndOS(v1alpha1.Kube129, framework.Ubuntu2204, nil, true), + ) + runTinkerbellSimpleFlowWithoutClusterConfigGeneration(test) +} + func TestTinkerbellKubernetes130Ubuntu2204SimpleFlow(t *testing.T) { provider := framework.NewTinkerbell(t) test := framework.NewClusterE2ETest( diff --git a/test/framework/cloudstack.go b/test/framework/cloudstack.go index ee1040862b66..439a2243efff 100644 --- a/test/framework/cloudstack.go +++ b/test/framework/cloudstack.go @@ -430,7 +430,7 @@ func (c *CloudStack) ClusterStateValidations() []clusterf.StateValidation { // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all // cloudstack machine configs. -func (c *CloudStack) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (c *CloudStack) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), api.CloudStackToConfigFiller( diff --git a/test/framework/cluster.go b/test/framework/cluster.go index 333024bab648..f6c72af2afa7 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -150,7 +150,7 @@ func NewClusterE2ETest(t T, provider Provider, opts ...ClusterE2ETestOpt) *Clust e.T.Cleanup(func() { e.cleanupResources() - tinkerbellCIEnvironment := os.Getenv(TinkerbellCIEnvironment) + tinkerbellCIEnvironment := os.Getenv(tinkerbellCIEnvironmentEnvVar) if e.Provider.Name() == tinkerbellProviderName && tinkerbellCIEnvironment == "true" { e.CleanupDockerEnvironment() } @@ -344,7 +344,7 @@ type Provider interface { CleanupResources(clusterName string) error UpdateKubeConfig(content *[]byte, clusterName string) error ClusterStateValidations() []clusterf.StateValidation - WithKubeVersionAndOS(kubeVersion v1alpha1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller + WithKubeVersionAndOS(kubeVersion v1alpha1.KubernetesVersion, os OS, release *releasev1.EksARelease, rtos ...bool) api.ClusterConfigFiller WithNewWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup) api.ClusterConfigFiller } diff --git a/test/framework/docker.go b/test/framework/docker.go index bac169e8ae1a..430a1154feaa 100644 --- a/test/framework/docker.go +++ b/test/framework/docker.go @@ -85,7 +85,7 @@ func (d *Docker) ClusterStateValidations() []clusterf.StateValidation { } // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version. -func (d *Docker) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (d *Docker) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, _ OS, _ *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), ) diff --git a/test/framework/nutanix.go b/test/framework/nutanix.go index 0e3aa0929176..4201bd6757c5 100644 --- a/test/framework/nutanix.go +++ b/test/framework/nutanix.go @@ -197,7 +197,7 @@ func (n *Nutanix) WithProviderUpgrade(fillers ...api.NutanixFiller) ClusterE2ETe // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all // nutanix machine configs. -func (n *Nutanix) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (n *Nutanix) WithKubeVersionAndOS(_ anywherev1.KubernetesVersion, _ OS, _ *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { // TODO: Update tests to use this panic("Not implemented for Nutanix yet") } diff --git a/test/framework/snow.go b/test/framework/snow.go index 9e0d5cc67ea0..386d86e6402d 100644 --- a/test/framework/snow.go +++ b/test/framework/snow.go @@ -330,7 +330,7 @@ func (s *Snow) withBottlerocketStaticIPForKubeVersion(kubeVersion anywherev1.Kub // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the correct AMI ID // and devices for the Snow machine configs. -func (s *Snow) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (s *Snow) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, _ *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { envar := fmt.Sprintf("T_SNOW_AMIID_%s_%s", strings.ToUpper(strings.ReplaceAll(string(os), "-", "_")), strings.ReplaceAll(string(kubeVersion), ".", "_")) return api.JoinClusterConfigFillers( diff --git a/test/framework/tinkerbell.go b/test/framework/tinkerbell.go index 1e6b89fc7525..bcc5fe71e87e 100644 --- a/test/framework/tinkerbell.go +++ b/test/framework/tinkerbell.go @@ -14,32 +14,33 @@ import ( ) const ( - tinkerbellProviderName = "tinkerbell" - tinkerbellBootstrapIPEnvVar = "T_TINKERBELL_BOOTSTRAP_IP" - tinkerbellControlPlaneNetworkCidrEnvVar = "T_TINKERBELL_CP_NETWORK_CIDR" - tinkerbellImageUbuntu125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_25" - tinkerbellImageUbuntu126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_26" - tinkerbellImageUbuntu127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_27" - tinkerbellImageUbuntu128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_28" - tinkerbellImageUbuntu129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_29" - tinkerbellImageUbuntu130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_30" - tinkerbellImageUbuntu2204Kubernetes125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_25" - tinkerbellImageUbuntu2204Kubernetes126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_26" - tinkerbellImageUbuntu2204Kubernetes127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_27" - tinkerbellImageUbuntu2204Kubernetes128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_28" - tinkerbellImageUbuntu2204Kubernetes129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_29" - tinkerbellImageUbuntu2204Kubernetes130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_30" - tinkerbellImageRedHat125EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_25" - tinkerbellImageRedHat126EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_26" - tinkerbellImageRedHat127EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_27" - tinkerbellImageRedHat128EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_28" - tinkerbellImageRedHat129EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_29" - tinkerbellImageRedHat130EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_30" - tinkerbellInventoryCsvFilePathEnvVar = "T_TINKERBELL_INVENTORY_CSV" - tinkerbellSSHAuthorizedKey = "T_TINKERBELL_SSH_AUTHORIZED_KEY" - TinkerbellCIEnvironment = "T_TINKERBELL_CI_ENVIRONMENT" - controlPlaneIdentifier = "cp" - workerIdentifier = "worker" + tinkerbellProviderName = "tinkerbell" + tinkerbellBootstrapIPEnvVar = "T_TINKERBELL_BOOTSTRAP_IP" + tinkerbellControlPlaneNetworkCidrEnvVar = "T_TINKERBELL_CP_NETWORK_CIDR" + tinkerbellImageUbuntu125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_25" + tinkerbellImageUbuntu126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_26" + tinkerbellImageUbuntu127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_27" + tinkerbellImageUbuntu128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_28" + tinkerbellImageUbuntu129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_29" + tinkerbellImageUbuntu130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_1_30" + tinkerbellImageUbuntu2204Kubernetes125EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_25" + tinkerbellImageUbuntu2204Kubernetes126EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_26" + tinkerbellImageUbuntu2204Kubernetes127EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_27" + tinkerbellImageUbuntu2204Kubernetes128EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_28" + tinkerbellImageUbuntu2204Kubernetes129EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_29" + tinkerbellImageUbuntu2204Kubernetes129RTOSEnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_29_RTOS" + tinkerbellImageUbuntu2204Kubernetes130EnvVar = "T_TINKERBELL_IMAGE_UBUNTU_2204_1_30" + tinkerbellImageRedHat125EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_25" + tinkerbellImageRedHat126EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_26" + tinkerbellImageRedHat127EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_27" + tinkerbellImageRedHat128EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_28" + tinkerbellImageRedHat129EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_29" + tinkerbellImageRedHat130EnvVar = "T_TINKERBELL_IMAGE_REDHAT_1_30" + tinkerbellInventoryCsvFilePathEnvVar = "T_TINKERBELL_INVENTORY_CSV" + tinkerbellSSHAuthorizedKey = "T_TINKERBELL_SSH_AUTHORIZED_KEY" + tinkerbellCIEnvironmentEnvVar = "T_TINKERBELL_CI_ENVIRONMENT" + controlPlaneIdentifier = "cp" + workerIdentifier = "worker" ) var requiredTinkerbellEnvVars = []string{ @@ -55,6 +56,7 @@ var requiredTinkerbellEnvVars = []string{ tinkerbellImageUbuntu2204Kubernetes127EnvVar, tinkerbellImageUbuntu2204Kubernetes128EnvVar, tinkerbellImageUbuntu2204Kubernetes129EnvVar, + tinkerbellImageUbuntu2204Kubernetes129RTOSEnvVar, tinkerbellImageUbuntu2204Kubernetes130EnvVar, tinkerbellImageRedHat125EnvVar, tinkerbellImageRedHat126EnvVar, @@ -154,11 +156,11 @@ func (t *Tinkerbell) CleanupResources(_ string) error { // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right image for all // tinkerbell machine configs. -func (t *Tinkerbell) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (t *Tinkerbell) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, _ *releasev1.EksARelease, rtos ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), api.TinkerbellToConfigFiller( - imageForKubeVersionAndOS(kubeVersion, os, ""), + imageForKubeVersionAndOS(kubeVersion, os, "", rtos...), api.WithOsFamilyForAllTinkerbellMachines(osFamiliesForOS[os]), ), ) @@ -191,8 +193,11 @@ func (t *Tinkerbell) WithNewWorkerNodeGroup(name string, workerNodeGroup *Worker panic("Not implemented for Tinkerbell yet") } -func envVarForImage(os OS, kubeVersion anywherev1.KubernetesVersion) string { +func envVarForImage(os OS, kubeVersion anywherev1.KubernetesVersion, rtos ...bool) string { imageEnvVar := fmt.Sprintf("T_TINKERBELL_IMAGE_%s_%s", strings.ToUpper(strings.ReplaceAll(string(os), "-", "_")), strings.ReplaceAll(string(kubeVersion), ".", "_")) + if len(rtos) > 0 && rtos[0] { + imageEnvVar = fmt.Sprintf("%s_RTOS", imageEnvVar) + } return imageEnvVar } @@ -319,14 +324,14 @@ func WithHookImagesURLPath(url string) TinkerbellOpt { } // imageForKubeVersionAndOS sets osImageURL on the appropriate field in the Machine Config based on the machineConfigType string provided else sets it at Data Center config. -func imageForKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, operatingSystem OS, machineConfigType string) api.TinkerbellFiller { +func imageForKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, operatingSystem OS, machineConfigType string, rtos ...bool) api.TinkerbellFiller { var tinkerbellFiller api.TinkerbellFiller if machineConfigType == workerIdentifier { - tinkerbellFiller = api.WithTinkerbellWorkerMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion)), osFamiliesForOS[operatingSystem]) + tinkerbellFiller = api.WithTinkerbellWorkerMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion, rtos...)), osFamiliesForOS[operatingSystem]) } else if machineConfigType == controlPlaneIdentifier { - tinkerbellFiller = api.WithTinkerbellCPMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion)), osFamiliesForOS[operatingSystem]) + tinkerbellFiller = api.WithTinkerbellCPMachineConfigOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion, rtos...)), osFamiliesForOS[operatingSystem]) } else { - tinkerbellFiller = api.WithTinkerbellOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion))) + tinkerbellFiller = api.WithTinkerbellOSImageURL(os.Getenv(envVarForImage(operatingSystem, kubeVersion, rtos...))) } return tinkerbellFiller } @@ -431,6 +436,11 @@ func Ubuntu2204Kubernetes129Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2204, "") } +// Ubuntu2204Kubernetes129RTOSImage represents an Ubuntu 22.04 RTOS raw image corresponding to Kubernetes 1.29. +func Ubuntu2204Kubernetes129RTOSImage() api.TinkerbellFiller { + return imageForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2204, "", true) +} + // Ubuntu2204Kubernetes130Image represents an Ubuntu 22.04 raw image corresponding to Kubernetes 1.30. func Ubuntu2204Kubernetes130Image() api.TinkerbellFiller { return imageForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2204, "") diff --git a/test/framework/vsphere.go b/test/framework/vsphere.go index 1fc0a138ffb3..f7a22984e982 100644 --- a/test/framework/vsphere.go +++ b/test/framework/vsphere.go @@ -329,7 +329,7 @@ func WithVSphereWorkerNodeGroup(name string, workerNodeGroup *WorkerNodeGroup, f } // WithMachineTemplate returns an api.ClusterConfigFiller that changes template in machine template. -func (v *VSphere) WithMachineTemplate(machineName string, template string) api.ClusterConfigFiller { +func (v *VSphere) WithMachineTemplate(machineName, template string) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.VSphereToConfigFiller(api.WithMachineTemplate(machineName, template)), ) @@ -391,7 +391,7 @@ func (v *VSphere) ClusterConfigUpdates() []api.ClusterConfigFiller { // WithKubeVersionAndOS returns a cluster config filler that sets the cluster kube version and the right template for all // vsphere machine configs. -func (v *VSphere) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease) api.ClusterConfigFiller { +func (v *VSphere) WithKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS, release *releasev1.EksARelease, _ ...bool) api.ClusterConfigFiller { return api.JoinClusterConfigFillers( api.ClusterToConfigFiller(api.WithKubernetesVersion(kubeVersion)), api.VSphereToConfigFiller( From 7235fc3e3850b4aacd9bd8accdf0f0e7437dc576 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 23:09:35 -0700 Subject: [PATCH 174/193] Bump the kubernetes group with 6 updates (#8213) Bumps the kubernetes group with 6 updates: | Package | From | To | | --- | --- | --- | | [k8s.io/api](https://github.com/kubernetes/api) | `0.29.4` | `0.29.5` | | [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) | `0.29.4` | `0.29.5` | | [k8s.io/apiserver](https://github.com/kubernetes/apiserver) | `0.29.4` | `0.29.5` | | [k8s.io/client-go](https://github.com/kubernetes/client-go) | `0.29.4` | `0.29.5` | | [k8s.io/component-base](https://github.com/kubernetes/component-base) | `0.29.4` | `0.29.5` | | [k8s.io/kubelet](https://github.com/kubernetes/kubelet) | `0.29.3` | `0.29.5` | Updates `k8s.io/api` from 0.29.4 to 0.29.5 - [Commits](https://github.com/kubernetes/api/compare/v0.29.4...v0.29.5) Updates `k8s.io/apimachinery` from 0.29.4 to 0.29.5 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.29.4...v0.29.5) Updates `k8s.io/apiserver` from 0.29.4 to 0.29.5 - [Commits](https://github.com/kubernetes/apiserver/compare/v0.29.4...v0.29.5) Updates `k8s.io/client-go` from 0.29.4 to 0.29.5 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.4...v0.29.5) Updates `k8s.io/component-base` from 0.29.4 to 0.29.5 - [Commits](https://github.com/kubernetes/component-base/compare/v0.29.4...v0.29.5) Updates `k8s.io/kubelet` from 0.29.3 to 0.29.5 - [Commits](https://github.com/kubernetes/kubelet/compare/v0.29.3...v0.29.5) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/apiserver dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/component-base dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes - dependency-name: k8s.io/kubelet dependency-type: direct:production update-type: version-update:semver-patch dependency-group: kubernetes ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 2e49888ac537..0c9a3399e8b1 100644 --- a/go.mod +++ b/go.mod @@ -51,11 +51,11 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.14.4 - k8s.io/api v0.29.4 - k8s.io/apimachinery v0.29.4 - k8s.io/apiserver v0.29.4 - k8s.io/client-go v0.29.4 - k8s.io/component-base v0.29.4 + k8s.io/api v0.29.5 + k8s.io/apimachinery v0.29.5 + k8s.io/apiserver v0.29.5 + k8s.io/client-go v0.29.5 + k8s.io/component-base v0.29.5 k8s.io/klog/v2 v2.110.1 k8s.io/utils v0.0.0-20240102154912-e7106e64919e oras.land/oras-go v1.2.5 @@ -196,7 +196,7 @@ require ( k8s.io/apiextensions-apiserver v0.29.1 // indirect k8s.io/cluster-bootstrap v0.28.5 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/kubelet v0.29.3 + k8s.io/kubelet v0.29.5 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index e59d62b47044..d6e077f4e876 100644 --- a/go.sum +++ b/go.sum @@ -1469,29 +1469,29 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= -k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= +k8s.io/api v0.29.5 h1:levS+umUigHCfI3riD36pMY1vQEbrzh4r1ivVWAhHaI= +k8s.io/api v0.29.5/go.mod h1:7b18TtPcJzdjk7w5zWyIHgoAtpGeRvGGASxlS7UZXdQ= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= -k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/apimachinery v0.29.5 h1:Hofa2BmPfpoT+IyDTlcPdCHSnHtEQMoJYGVoQpRTfv4= +k8s.io/apimachinery v0.29.5/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= -k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= +k8s.io/apiserver v0.29.5 h1:223C+JkRnGmudEU00GfpX6quDSrzwwP0DuXOYTyUYb0= +k8s.io/apiserver v0.29.5/go.mod h1:zN9xdatz5g7XwL1Xoz9hD4QQON1GN0c+1kV5e/NHejM= k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= -k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= +k8s.io/client-go v0.29.5 h1:nlASXmPQy190qTteaVP31g3c/wi2kycznkTP7Sv1zPc= +k8s.io/client-go v0.29.5/go.mod h1:aY5CnqUUvXYccJhm47XHoPcRyX6vouHdIBHaKZGTbK4= k8s.io/cluster-bootstrap v0.28.5 h1:KyFY6l5xK5oxRjjGotgivlbQ0AReRctMMoNpxSJaJxM= k8s.io/cluster-bootstrap v0.28.5/go.mod h1:nJzrDb8AWtUm1RSoXx+lDb2f7i54Ndfx4v8x3s4kZ2Y= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= -k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/component-base v0.29.5 h1:Ptj8AzG+p8c2a839XriHwxakDpZH9uvIgYz+o1agjg8= +k8s.io/component-base v0.29.5/go.mod h1:9nBUoPxW/yimISIgAG7sJDrUGJlu7t8HnDafIrOdU8Q= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1507,8 +1507,8 @@ k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKf k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubelet v0.29.3 h1:X9h0ZHzc+eUeNTaksbN0ItHyvGhQ7Z0HPjnQD2oHdwU= -k8s.io/kubelet v0.29.3/go.mod h1:jDiGuTkFOUynyBKzOoC1xRSWlgAZ9UPcTYeFyjr6vas= +k8s.io/kubelet v0.29.5 h1:tYYyc2JcrDt8jFYTsKpgcIpp+S5a/nm85CY4liosprw= +k8s.io/kubelet v0.29.5/go.mod h1:eWJR0OtRRkLwKEYjsQXcTyTZlSfgR3Py1xJVFa0ISTk= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= From 25d6c527c1db5260ae8b291d737fdf69cf8a1a1e Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Fri, 31 May 2024 11:45:04 -0700 Subject: [PATCH 175/193] [PR BOT] Generate release testdata files (#8233) --- .../testdata/main-bundle-release.yaml | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index a4543c62bba8..514f310a0993 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -112,26 +112,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 - version: v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -890,26 +890,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 - version: v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -1668,26 +1668,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 - version: v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -2446,26 +2446,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 - version: v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -3224,26 +3224,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 - version: v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: @@ -4002,26 +4002,26 @@ spec: arch: - amd64 description: Container image for cilium image - imageDigest: sha256:0d071c308d85b33e7c69d14a70ea746d732bc2c557506b0ae2ef388e27b9c443 + imageDigest: sha256:8d5876112cd6de92c2d4cf9de3281d942ba3cc7df08b2109f94271bdfcb17f25 name: cilium os: linux - uri: public.ecr.aws/isovalent/cilium:v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:v1.13.16-eksa.2 helmChart: description: Helm chart for cilium-chart - imageDigest: sha256:3712aa78c13fddd4f96b4a451ed1082b52f5089e70949b0e592e26f076b647de + imageDigest: sha256:482c1d609f03993d254e5799bb267cfb5b7bf2950760db3709c307d6fc6a2c4c name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/cilium:1.13.16-eksa.2 manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.15-eksa.1/cilium.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cilium/manifests/cilium/v1.13.16-eksa.2/cilium.yaml operator: arch: - amd64 description: Container image for operator-generic image - imageDigest: sha256:872add676d7bc0cc6b26f43e082a172885919e9d96646538744124b3bd4baa06 + imageDigest: sha256:3a4eaf8cf250e816635ae8e49b3b622b9ab7b83af41a00dc811408c347bceb51 name: operator-generic os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.13.15-eksa.1 - version: v1.13.15-eksa.1 + uri: public.ecr.aws/isovalent/operator-generic:v1.13.16-eksa.2 + version: v1.13.16-eksa.2 cloudStack: clusterAPIController: arch: From 8438b97285bf8eee6e5101f469ab515108d42d03 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Fri, 31 May 2024 15:52:10 -0700 Subject: [PATCH 176/193] Remove K8s 1.30 feature flag and default as cluster version (#8234) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- .../buildspecs/nutanix-test-eks-a-cli.yml | 6 +- .../build/buildspecs/quick-test-eks-a-cli.yml | 4 +- test/e2e/QUICK_TESTS.yaml | 14 +- test/e2e/SKIPPED_TESTS.yaml | 6 + test/e2e/nutanix_test.go | 430 +++++++++++------- test/framework/nutanix.go | 88 ++-- 6 files changed, 325 insertions(+), 223 deletions(-) diff --git a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml index 58ab4254c7c0..2865acf2b0d3 100644 --- a/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/nutanix-test-eks-a-cli.yml @@ -33,21 +33,21 @@ env: T_NUTANIX_POD_CIDR: "nutanix_ci:nutanix_pod_cidr" T_NUTANIX_SERVICE_CIDR: "nutanix_ci:nutanix_service_cidr" T_NUTANIX_ADDITIONAL_TRUST_BUNDLE: "nutanix_ci:nutanix_additional_trust_bundle" - T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_25: "nutanix_ci:nutanix_template_ubuntu_1_25" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_26: "nutanix_ci:nutanix_template_ubuntu_1_26" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27: "nutanix_ci:nutanix_template_ubuntu_1_27" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28: "nutanix_ci:nutanix_template_ubuntu_1_28" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29: "nutanix_ci:nutanix_template_ubuntu_1_29" - T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25: "nutanix_ci:nutanix_template_rhel_8_1_25" + T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_30: "nutanix_ci:nutanix_template_ubuntu_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26: "nutanix_ci:nutanix_template_rhel_8_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27: "nutanix_ci:nutanix_template_rhel_8_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28: "nutanix_ci:nutanix_template_rhel_8_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29: "nutanix_ci:nutanix_template_rhel_8_1_29" - T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25: "nutanix_ci:nutanix_template_rhel_9_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_30: "nutanix_ci:nutanix_template_rhel_8_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26: "nutanix_ci:nutanix_template_rhel_9_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27: "nutanix_ci:nutanix_template_rhel_9_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28: "nutanix_ci:nutanix_template_rhel_9_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29: "nutanix_ci:nutanix_template_rhel_9_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_30: "nutanix_ci:nutanix_template_rhel_9_1_30" phases: pre_build: diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index ee22ccc83ede..2f6fe0cfc7ba 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -116,16 +116,18 @@ env: T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27: "nutanix_ci:nutanix_template_ubuntu_1_27" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28: "nutanix_ci:nutanix_template_ubuntu_1_28" T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29: "nutanix_ci:nutanix_template_ubuntu_1_29" + T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_30: "nutanix_ci:nutanix_template_ubuntu_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25: "nutanix_ci:nutanix_template_rhel_8_1_25" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26: "nutanix_ci:nutanix_template_rhel_8_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27: "nutanix_ci:nutanix_template_rhel_8_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28: "nutanix_ci:nutanix_template_rhel_8_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29: "nutanix_ci:nutanix_template_rhel_8_1_29" - T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25: "nutanix_ci:nutanix_template_rhel_9_1_25" + T_NUTANIX_TEMPLATE_NAME_REDHAT_1_30: "nutanix_ci:nutanix_template_rhel_8_1_30" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26: "nutanix_ci:nutanix_template_rhel_9_1_26" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27: "nutanix_ci:nutanix_template_rhel_9_1_27" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28: "nutanix_ci:nutanix_template_rhel_9_1_28" T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29: "nutanix_ci:nutanix_template_rhel_9_1_29" + T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_30: "nutanix_ci:nutanix_template_rhel_9_1_30" # Snow secrets T_SNOW_DEVICES: "snow_ci:snow_devices" T_SNOW_CREDENTIALS_S3_PATH: "snow_ci:snow_credentials_s3_path" diff --git a/test/e2e/QUICK_TESTS.yaml b/test/e2e/QUICK_TESTS.yaml index e59cb8184cbc..d3ff8a0272ed 100644 --- a/test/e2e/QUICK_TESTS.yaml +++ b/test/e2e/QUICK_TESTS.yaml @@ -1,6 +1,6 @@ quick_tests: # Docker -- TestDocker.*128 +- TestDocker.*130 # vSphere - ^TestVSphereKubernetes129To130RedHatUpgrade$ - TestVSphereKubernetes129To130StackedEtcdRedHatUpgrade @@ -15,12 +15,12 @@ quick_tests: - TestCloudStackKubernetes129To130RedhatMultipleFieldsUpgrade - TestCloudStackKubernetes129To130StackedEtcdRedhatMultipleFieldsUpgrade # Nutanix -- TestNutanixKubernetes128to129RedHat9Upgrade -- TestNutanixKubernetes128to129StackedEtcdRedHat9Upgrade -- TestNutanixKubernetes128to129RedHat8Upgrade -- TestNutanixKubernetes128to129StackedEtcdRedHat8Upgrade -- TestNutanixKubernetes128To129UbuntuUpgrade -- TestNutanixKubernetes128To129StackedEtcdUbuntuUpgrade +- TestNutanixKubernetes129to130RedHat9Upgrade +- TestNutanixKubernetes129to130StackedEtcdRedHat9Upgrade +- TestNutanixKubernetes129to130RedHat8Upgrade +- TestNutanixKubernetes129to130StackedEtcdRedHat8Upgrade +- TestNutanixKubernetes129To130UbuntuUpgrade +- TestNutanixKubernetes129To130StackedEtcdUbuntuUpgrade # Snow # - TestSnowKubernetes128SimpleFlow # - TestSnowKubernetes128StackedEtcdSimpleFlow diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index 5a60d2180b5c..263e041111ef 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -62,6 +62,12 @@ skipped_tests: - TestCloudStackKubernetes130RedhatCuratedPackagesAdotSimpleFlow - TestCloudStackKubernetes130RedhatCuratedPackagesCertManagerSimpleFlow - TestCloudStackKubernetes130RedhatWorkloadClusterCuratedPackagesEmissarySimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesEmissarySimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesHarborSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesAdotSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow +- TestNutanixKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow # UpgradeFromLatestTests for new K8s version (expected to work only after the release is out) - TestDockerKubernetes130AirgappedUpgradeFromLatestRegistryMirrorAndCert diff --git a/test/e2e/nutanix_test.go b/test/e2e/nutanix_test.go index ea47986ed777..6dcc5c200e94 100644 --- a/test/e2e/nutanix_test.go +++ b/test/e2e/nutanix_test.go @@ -157,78 +157,6 @@ func TestNutanixKubernetes126UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t runAutoscalerWithMetricsServerSimpleFlow(test) } -func TestNutanixKubernetes125UbuntuCuratedPackagesSimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageEmissaryInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) { - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackagesAdotInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runCuratedPackagesPrometheusInstallSimpleFlow(test) -} - -func TestNutanixKubernetes125UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { - minNodes := 1 - maxNodes := 2 - framework.CheckCuratedPackagesCredentials(t) - test := framework.NewClusterE2ETest(t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), - framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube125), - EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, - EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), - ) - runAutoscalerWithMetricsServerSimpleFlow(test) -} - func TestNutanixKubernetes128UbuntuCuratedPackagesSimpleFlow(t *testing.T) { test := framework.NewClusterE2ETest(t, framework.NewNutanix(t, framework.WithUbuntu128Nutanix()), @@ -373,16 +301,79 @@ func TestNutanixKubernetes129UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t runAutoscalerWithMetricsServerSimpleFlow(test) } -// Simpleflow -func TestNutanixKubernetes125UbuntuSimpleFlowWithName(t *testing.T) { - test := framework.NewClusterE2ETest( - t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), +func TestNutanixKubernetes130UbuntuCuratedPackagesSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), ) - runSimpleFlow(test) + runCuratedPackageInstallSimpleFlow(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesEmissarySimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageEmissaryInstallSimpleFlow(test) } +func TestNutanixKubernetes130UbuntuCuratedPackagesHarborSimpleFlow(t *testing.T) { + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackageHarborInstallSimpleFlowLocalStorageProvisioner(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesAdotSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesAdotInstallSimpleFlow(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runCuratedPackagesPrometheusInstallSimpleFlow(test) +} + +func TestNutanixKubernetes130UbuntuCuratedPackagesClusterAutoscalerSimpleFlow(t *testing.T) { + minNodes := 1 + maxNodes := 2 + framework.CheckCuratedPackagesCredentials(t) + test := framework.NewClusterE2ETest(t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130), api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes)), + framework.WithPackageConfig(t, packageBundleURI(v1alpha1.Kube130), + EksaPackageControllerHelmChartName, EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, EksaPackageControllerHelmValues, nil), + ) + runAutoscalerWithMetricsServerSimpleFlow(test) +} + +// Simpleflow func TestNutanixKubernetes126UbuntuSimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -419,11 +410,11 @@ func TestNutanixKubernetes129UbuntuSimpleFlowWithName(t *testing.T) { runSimpleFlow(test) } -func TestNutanixKubernetes125RedHat8SimpleFlowWithName(t *testing.T) { +func TestNutanixKubernetes130UbuntuSimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithRedHat125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -455,11 +446,20 @@ func TestNutanixKubernetes128RedHat8SimpleFlowWithName(t *testing.T) { runSimpleFlow(test) } -func TestNutanixKubernetes125RedHat9SimpleFlowWithName(t *testing.T) { +func TestNutanixKubernetes129RedHat8SimpleFlowWithName(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithRedHat129Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + ) + runSimpleFlow(test) +} + +func TestNutanixKubernetes130RedHat8SimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithRedHat9Kubernetes125Nutanix()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithRedHat130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -500,13 +500,11 @@ func TestNutanixKubernetes129RedHat9SimpleFlowWithName(t *testing.T) { runSimpleFlow(test) } -func TestNutanixKubernetes125UbuntuSimpleFlowWithUUID(t *testing.T) { +func TestNutanixKubernetes130RedHat9SimpleFlowWithName(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithUbuntu125NutanixUUID(), - framework.WithPrismElementClusterUUID(), - framework.WithNutanixSubnetUUID()), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithRedHat9Kubernetes130Nutanix()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) runSimpleFlow(test) } @@ -555,6 +553,17 @@ func TestNutanixKubernetes129UbuntuSimpleFlowWithUUID(t *testing.T) { runSimpleFlow(test) } +func TestNutanixKubernetes130UbuntuSimpleFlowWithUUID(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu130NutanixUUID(), + framework.WithPrismElementClusterUUID(), + framework.WithNutanixSubnetUUID()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestNutanixKubernetes128RedHatSimpleFlowWithUUID(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -577,6 +586,17 @@ func TestNutanixKubernetes129RedHatSimpleFlowWithUUID(t *testing.T) { runSimpleFlow(test) } +func TestNutanixKubernetes130RedHatSimpleFlowWithUUID(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithRedHat130NutanixUUID(), + framework.WithPrismElementClusterUUID(), + framework.WithNutanixSubnetUUID()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + ) + runSimpleFlow(test) +} + func TestNutanixKubernetes128RedHat9SimpleFlowWithUUID(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -599,24 +619,18 @@ func TestNutanixKubernetes129RedHat9SimpleFlowWithUUID(t *testing.T) { runSimpleFlow(test) } -// Upgrade -func TestNutanixKubernetes125To126UbuntuUpgrade(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes130RedHat9SimpleFlowWithUUID(t *testing.T) { test := framework.NewClusterE2ETest( t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), - ) - runSimpleUpgradeFlow( - test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.Ubuntu126Template()), + framework.NewNutanix(t, framework.WithRedHat9Kubernetes130NutanixUUID(), + framework.WithPrismElementClusterUUID(), + framework.WithNutanixSubnetUUID()), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), ) + runSimpleFlow(test) } +// Upgrade func TestNutanixKubernetes126To127UbuntuUpgrade(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix()) test := framework.NewClusterE2ETest( @@ -665,6 +679,22 @@ func TestNutanixKubernetes128To129StackedEtcdUbuntuUpgrade(t *testing.T) { ) } +func TestNutanixKubernetes129To130StackedEtcdUbuntuUpgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithStackedEtcdTopology()), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), + ) +} + func TestNutanixKubernetes128To129UbuntuUpgrade(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu128Nutanix()) test := framework.NewClusterE2ETest( @@ -681,20 +711,19 @@ func TestNutanixKubernetes128To129UbuntuUpgrade(t *testing.T) { ) } -func TestNutanixKubernetes125to126RedHatUpgrade(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithRedHat125Nutanix()) +func TestNutanixKubernetes129To130UbuntuUpgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithExternalEtcdTopology(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.RedHat126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.Ubuntu130Template()), ) } @@ -768,20 +797,20 @@ func TestNutanixKubernetes128to129StackedEtcdRedHat8Upgrade(t *testing.T) { ) } -func TestNutanixKubernetes125to126RedHat9Upgrade(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithRedHat9Kubernetes125Nutanix()) +func TestNutanixKubernetes129to130RedHatUpgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithRedHat129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube126, - framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube126)), - provider.WithProviderUpgrade(provider.RedHat9Kubernetes126Template()), + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.RedHat130Template()), ) } @@ -838,6 +867,23 @@ func TestNutanixKubernetes128to129StackedEtcdRedHat9Upgrade(t *testing.T) { ) } +func TestNutanixKubernetes129to130RedHat9Upgrade(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithRedHat9Kubernetes129Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithKubernetesVersion(v1alpha1.Kube130)), + provider.WithProviderUpgrade(provider.RedHat9Kubernetes130Template()), + ) +} + func TestNutanixKubernetes128UbuntuWorkerNodeScaleUp1To3(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu128Nutanix()) test := framework.NewClusterE2ETest( @@ -872,20 +918,19 @@ func TestNutanixKubernetes129UbuntuWorkerNodeScaleUp1To3(t *testing.T) { ) } -// 1 worker node cluster scaled up to 3 -func TestNutanixKubernetes125UbuntuWorkerNodeScaleUp1To3(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes130UbuntuWorkerNodeScaleUp1To3(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, + v1alpha1.Kube130, framework.WithClusterUpgrade(api.WithWorkerNodeCount(3)), ) } @@ -944,24 +989,6 @@ func TestNutanixKubernetes128UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { ) } -// 1 node control plane cluster scaled up to 3 -func TestNutanixKubernetes125UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) - test := framework.NewClusterE2ETest( - t, - provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), - framework.WithClusterFiller(api.WithControlPlaneCount(1)), - framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(3)), - ) - runSimpleUpgradeFlow( - test, - v1alpha1.Kube125, - framework.WithClusterFiller(api.WithControlPlaneCount(3)), - ) -} - // 1 node control plane cluster scaled up to 3 func TestNutanixKubernetes126UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { provider := framework.NewNutanix(t, framework.WithUbuntu126Nutanix()) @@ -1015,21 +1042,37 @@ func TestNutanixKubernetes128UbuntuWorkerNodeScaleDown3To1(t *testing.T) { ) } -// 3 worker node cluster scaled down to 1 -func TestNutanixKubernetes125UbuntuWorkerNodeScaleDown3To1(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes129UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube129, framework.WithClusterFiller(api.WithControlPlaneCount(3)), + ) +} + +func TestNutanixKubernetes130UbuntuControlPlaneNodeScaleUp1To3(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), - framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(3)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), + v1alpha1.Kube130, + framework.WithClusterFiller(api.WithControlPlaneCount(3)), ) } @@ -1086,21 +1129,37 @@ func TestNutanixKubernetes128UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { ) } -// 3 node control plane cluster scaled down to 1 -func TestNutanixKubernetes125UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu125Nutanix()) +func TestNutanixKubernetes129UbuntuWorkerNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, provider, - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) runSimpleUpgradeFlow( test, - v1alpha1.Kube125, - framework.WithClusterFiller(api.WithControlPlaneCount(1)), + v1alpha1.Kube129, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), + ) +} + +func TestNutanixKubernetes130UbuntuWorkerNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterUpgrade(api.WithWorkerNodeCount(1)), ) } @@ -1140,20 +1199,42 @@ func TestNutanixKubernetes127UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { ) } -// OIDC Tests -func TestNutanixKubernetes125OIDC(t *testing.T) { +// 3 node control plane cluster scaled down to 1 +func TestNutanixKubernetes129UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu129Nutanix()) test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithOIDC(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube129)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube129, framework.WithClusterFiller(api.WithControlPlaneCount(1)), + ) +} + +func TestNutanixKubernetes130UbuntuControlPlaneNodeScaleDown3To1(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) + test := framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(3)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runOIDCFlow(test) + runSimpleUpgradeFlow( + test, + v1alpha1.Kube130, + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + ) } +// OIDC Tests func TestNutanixKubernetes126OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1193,20 +1274,20 @@ func TestNutanixKubernetes129OIDC(t *testing.T) { runOIDCFlow(test) } -// AWS IAM Authenticator Tests -func TestNutanixKubernetes125AWSIamAuth(t *testing.T) { +func TestNutanixKubernetes130OIDC(t *testing.T) { test := framework.NewClusterE2ETest( t, - framework.NewNutanix(t, framework.WithUbuntu125Nutanix()), - framework.WithAWSIam(), - framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube125)), + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithOIDC(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), framework.WithClusterFiller(api.WithControlPlaneCount(1)), framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), framework.WithClusterFiller(api.WithWorkerNodeCount(1)), ) - runAWSIamAuthFlow(test) + runOIDCFlow(test) } +// AWS IAM Authenticator Tests func TestNutanixKubernetes126AWSIamAuth(t *testing.T) { test := framework.NewClusterE2ETest( t, @@ -1246,13 +1327,26 @@ func TestNutanixKubernetes129AWSIamAuth(t *testing.T) { runAWSIamAuthFlow(test) } -func TestNutanixKubernetes128UbuntuManagementCPUpgradeAPI(t *testing.T) { - provider := framework.NewNutanix(t, framework.WithUbuntu128Nutanix()) +func TestNutanixKubernetes130AWSIamAuth(t *testing.T) { + test := framework.NewClusterE2ETest( + t, + framework.NewNutanix(t, framework.WithUbuntu130Nutanix()), + framework.WithAWSIam(), + framework.WithClusterFiller(api.WithKubernetesVersion(v1alpha1.Kube130)), + framework.WithClusterFiller(api.WithControlPlaneCount(1)), + framework.WithClusterFiller(api.WithEtcdCountIfExternal(1)), + framework.WithClusterFiller(api.WithWorkerNodeCount(1)), + ) + runAWSIamAuthFlow(test) +} + +func TestNutanixKubernetes130UbuntuManagementCPUpgradeAPI(t *testing.T) { + provider := framework.NewNutanix(t, framework.WithUbuntu130Nutanix()) test := framework.NewClusterE2ETest( t, provider, ).WithClusterConfig( api.ClusterToConfigFiller( - api.WithKubernetesVersion(v1alpha1.Kube128), + api.WithKubernetesVersion(v1alpha1.Kube130), api.WithControlPlaneCount(1), api.WithEtcdCountIfExternal(1), api.WithWorkerNodeCount(1), diff --git a/test/framework/nutanix.go b/test/framework/nutanix.go index 4201bd6757c5..4a2f1879f44f 100644 --- a/test/framework/nutanix.go +++ b/test/framework/nutanix.go @@ -33,21 +33,21 @@ const ( nutanixControlPlaneCidrVar = "T_NUTANIX_CONTROL_PLANE_CIDR" nutanixPodCidrVar = "T_NUTANIX_POD_CIDR" nutanixServiceCidrVar = "T_NUTANIX_SERVICE_CIDR" - nutanixTemplateNameUbuntu125Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_25" nutanixTemplateNameUbuntu126Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_26" nutanixTemplateNameUbuntu127Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_27" nutanixTemplateNameUbuntu128Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_28" nutanixTemplateNameUbuntu129Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_29" - nutanixTemplateNameRedHat125Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_25" + nutanixTemplateNameUbuntu130Var = "T_NUTANIX_TEMPLATE_NAME_UBUNTU_1_30" nutanixTemplateNameRedHat126Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_26" nutanixTemplateNameRedHat127Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_27" nutanixTemplateNameRedHat128Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_28" nutanixTemplateNameRedHat129Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_29" - nutanixTemplateNameRedHat9125Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_25" + nutanixTemplateNameRedHat130Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_1_30" nutanixTemplateNameRedHat9126Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_26" nutanixTemplateNameRedHat9127Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_27" nutanixTemplateNameRedHat9128Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_28" nutanixTemplateNameRedHat9129Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_29" + nutanixTemplateNameRedHat9130Var = "T_NUTANIX_TEMPLATE_NAME_REDHAT_9_1_30" ) var requiredNutanixEnvVars = []string{ @@ -66,21 +66,21 @@ var requiredNutanixEnvVars = []string{ nutanixSubnetName, nutanixPodCidrVar, nutanixServiceCidrVar, - nutanixTemplateNameUbuntu125Var, nutanixTemplateNameUbuntu126Var, nutanixTemplateNameUbuntu127Var, nutanixTemplateNameUbuntu128Var, nutanixTemplateNameUbuntu129Var, - nutanixTemplateNameRedHat125Var, + nutanixTemplateNameUbuntu130Var, nutanixTemplateNameRedHat126Var, nutanixTemplateNameRedHat127Var, nutanixTemplateNameRedHat128Var, nutanixTemplateNameRedHat129Var, - nutanixTemplateNameRedHat9125Var, + nutanixTemplateNameRedHat130Var, nutanixTemplateNameRedHat9126Var, nutanixTemplateNameRedHat9127Var, nutanixTemplateNameRedHat9128Var, nutanixTemplateNameRedHat9129Var, + nutanixTemplateNameRedHat9130Var, nutanixInsecure, } @@ -221,12 +221,6 @@ func withNutanixKubeVersionAndOS(kubeVersion anywherev1.KubernetesVersion, os OS } } -// WithUbuntu125Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.25 -// and the "ubuntu" osFamily in all machine configs. -func WithUbuntu125Nutanix() NutanixOpt { - return withNutanixKubeVersionAndOS(anywherev1.Kube125, Ubuntu2004, nil) -} - // WithUbuntu126Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.26 // and the "ubuntu" osFamily in all machine configs. func WithUbuntu126Nutanix() NutanixOpt { @@ -251,10 +245,10 @@ func WithUbuntu129Nutanix() NutanixOpt { return withNutanixKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, nil) } -// WithRedHat125Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 8 Nutanix template for k8s 1.25 -// and the "redhat" osFamily in all machine configs. -func WithRedHat125Nutanix() NutanixOpt { - return withNutanixKubeVersionAndOS(anywherev1.Kube125, RedHat8, nil) +// WithUbuntu130Nutanix returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template for k8s 1.30 +// and the "ubuntu" osFamily in all machine configs. +func WithUbuntu130Nutanix() NutanixOpt { + return withNutanixKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) } // WithRedHat126Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 8 Nutanix template for k8s 1.26 @@ -281,10 +275,10 @@ func WithRedHat129Nutanix() NutanixOpt { return withNutanixKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } -// WithRedHat9Kubernetes125Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template for k8s 1.25 +// WithRedHat130Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 8 Nutanix template for k8s 1.30 // and the "redhat" osFamily in all machine configs. -func WithRedHat9Kubernetes125Nutanix() NutanixOpt { - return withNutanixKubeVersionAndOS(anywherev1.Kube125, RedHat9, nil) +func WithRedHat130Nutanix() NutanixOpt { + return withNutanixKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) } // WithRedHat9Kubernetes126Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template for k8s 1.26 @@ -311,6 +305,12 @@ func WithRedHat9Kubernetes129Nutanix() NutanixOpt { return withNutanixKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// WithRedHat9Kubernetes130Nutanix returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template for k8s 1.30 +// and the "redhat" osFamily in all machine configs. +func WithRedHat9Kubernetes130Nutanix() NutanixOpt { + return withNutanixKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + // withNutanixKubeVersionAndOSForUUID returns a NutanixOpt that adds API fillers to use a Nutanix template UUID // corresponding to the provided OS family and Kubernetes version, in addition to configuring all machine configs // to use this OS family. @@ -321,12 +321,6 @@ func withNutanixKubeVersionAndOSForUUID(kubeVersion anywherev1.KubernetesVersion } } -// WithUbuntu125NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.25 -// and the "ubuntu" osFamily in all machine configs. -func WithUbuntu125NutanixUUID() NutanixOpt { - return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube125, Ubuntu2004, nil) -} - // WithUbuntu126NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.26 // and the "ubuntu" osFamily in all machine configs. func WithUbuntu126NutanixUUID() NutanixOpt { @@ -351,10 +345,10 @@ func WithUbuntu129NutanixUUID() NutanixOpt { return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube129, Ubuntu2004, nil) } -// WithRedHat125NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat Nutanix template UUID for k8s 1.25 -// and the "redhat" osFamily in all machine configs. -func WithRedHat125NutanixUUID() NutanixOpt { - return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube125, RedHat8, nil) +// WithUbuntu130NutanixUUID returns a NutanixOpt that adds API fillers to use a Ubuntu Nutanix template UUID for k8s 1.30 +// and the "ubuntu" osFamily in all machine configs. +func WithUbuntu130NutanixUUID() NutanixOpt { + return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube130, Ubuntu2004, nil) } // WithRedHat126NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat Nutanix template UUID for k8s 1.26 @@ -381,10 +375,10 @@ func WithRedHat129NutanixUUID() NutanixOpt { return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube129, RedHat8, nil) } -// WithRedHat9Kubernetes125NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template UUID for k8s 1.25 +// WithRedHat130NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat Nutanix template UUID for k8s 1.30 // and the "redhat" osFamily in all machine configs. -func WithRedHat9Kubernetes125NutanixUUID() NutanixOpt { - return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube125, RedHat9, nil) +func WithRedHat130NutanixUUID() NutanixOpt { + return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube130, RedHat8, nil) } // WithRedHat9Kubernetes126NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template UUID for k8s 1.26 @@ -411,6 +405,12 @@ func WithRedHat9Kubernetes129NutanixUUID() NutanixOpt { return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube129, RedHat9, nil) } +// WithRedHat9Kubernetes130NutanixUUID returns a NutanixOpt that adds API fillers to use a RedHat 9 Nutanix template UUID for k8s 1.30 +// and the "redhat" osFamily in all machine configs. +func WithRedHat9Kubernetes130NutanixUUID() NutanixOpt { + return withNutanixKubeVersionAndOSForUUID(anywherev1.Kube130, RedHat9, nil) +} + func (n *Nutanix) withNutanixUUID(name string, osFamily anywherev1.OSFamily) []api.NutanixFiller { uuid, err := n.client.GetImageUUIDFromName(context.Background(), name) if err != nil { @@ -457,12 +457,6 @@ func (n *Nutanix) templateForKubeVersionAndOS(kubeVersion anywherev1.KubernetesV return api.WithNutanixMachineTemplateImageName(template) } -// Ubuntu125Template returns NutanixFiller by reading the env var and setting machine config's -// image name parameter in the spec. -func (n *Nutanix) Ubuntu125Template() api.NutanixFiller { - return n.templateForKubeVersionAndOS(anywherev1.Kube125, Ubuntu2004, nil) -} - // Ubuntu126Template returns NutanixFiller by reading the env var and setting machine config's // image name parameter in the spec. func (n *Nutanix) Ubuntu126Template() api.NutanixFiller { @@ -487,10 +481,10 @@ func (n *Nutanix) Ubuntu129Template() api.NutanixFiller { return n.templateForKubeVersionAndOS(anywherev1.Kube129, Ubuntu2004, nil) } -// RedHat125Template returns NutanixFiller by reading the env var and setting machine config's +// Ubuntu130Template returns NutanixFiller by reading the env var and setting machine config's // image name parameter in the spec. -func (n *Nutanix) RedHat125Template() api.NutanixFiller { - return n.templateForKubeVersionAndOS(anywherev1.Kube125, RedHat8, nil) +func (n *Nutanix) Ubuntu130Template() api.NutanixFiller { + return n.templateForKubeVersionAndOS(anywherev1.Kube130, Ubuntu2004, nil) } // RedHat126Template returns NutanixFiller by reading the env var and setting machine config's @@ -517,10 +511,10 @@ func (n *Nutanix) RedHat129Template() api.NutanixFiller { return n.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat8, nil) } -// RedHat9Kubernetes125Template returns NutanixFiller by reading the env var and setting machine config's +// RedHat130Template returns NutanixFiller by reading the env var and setting machine config's // image name parameter in the spec. -func (n *Nutanix) RedHat9Kubernetes125Template() api.NutanixFiller { - return n.templateForKubeVersionAndOS(anywherev1.Kube125, RedHat9, nil) +func (n *Nutanix) RedHat130Template() api.NutanixFiller { + return n.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat8, nil) } // RedHat9Kubernetes126Template returns NutanixFiller by reading the env var and setting machine config's @@ -547,6 +541,12 @@ func (n *Nutanix) RedHat9Kubernetes129Template() api.NutanixFiller { return n.templateForKubeVersionAndOS(anywherev1.Kube129, RedHat9, nil) } +// RedHat9Kubernetes130Template returns NutanixFiller by reading the env var and setting machine config's +// image name parameter in the spec. +func (n *Nutanix) RedHat9Kubernetes130Template() api.NutanixFiller { + return n.templateForKubeVersionAndOS(anywherev1.Kube130, RedHat9, nil) +} + // ClusterStateValidations returns a list of provider specific ClusterStateValidations. func (n *Nutanix) ClusterStateValidations() []clusterf.StateValidation { return []clusterf.StateValidation{} From 6443b300de79a96fb6ad21241be37b989e09cbca Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Fri, 31 May 2024 16:17:10 -0700 Subject: [PATCH 177/193] Remove K8s 1.30 feature flag and default as cluster version (#8228) * Remove K8s 1.30 feature flag and default as cluster version Signed-off-by: Rahul Ganesh * Fix CI --------- Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- pkg/api/v1alpha1/cluster.go | 2 +- pkg/api/v1alpha1/cluster_test.go | 2 +- .../testdata/cluster_in_place_upgrade.yaml | 2 +- pkg/features/features.go | 9 --------- pkg/features/features_test.go | 8 -------- pkg/validations/cluster.go | 11 ----------- pkg/validations/cluster_test.go | 16 ---------------- .../createvalidations/preflightvalidations.go | 9 --------- .../upgradevalidations/preflightvalidations.go | 9 --------- test/framework/cluster.go | 4 +--- 10 files changed, 4 insertions(+), 68 deletions(-) diff --git a/pkg/api/v1alpha1/cluster.go b/pkg/api/v1alpha1/cluster.go index 9303ca3556b3..aa750a011da3 100644 --- a/pkg/api/v1alpha1/cluster.go +++ b/pkg/api/v1alpha1/cluster.go @@ -229,7 +229,7 @@ func GetAndValidateClusterConfig(fileName string) (*Cluster, error) { // GetClusterDefaultKubernetesVersion returns the default kubernetes version for a Cluster. func GetClusterDefaultKubernetesVersion() KubernetesVersion { - return Kube129 + return Kube130 } // ValidateClusterConfigContent validates a Cluster object without modifying it diff --git a/pkg/api/v1alpha1/cluster_test.go b/pkg/api/v1alpha1/cluster_test.go index fc6ef7e2423f..941484bb41cb 100644 --- a/pkg/api/v1alpha1/cluster_test.go +++ b/pkg/api/v1alpha1/cluster_test.go @@ -3980,7 +3980,7 @@ func TestValidateEksaVersion(t *testing.T) { func TestGetClusterDefaultKubernetesVersion(t *testing.T) { g := NewWithT(t) - g.Expect(GetClusterDefaultKubernetesVersion()).To(Equal(Kube129)) + g.Expect(GetClusterDefaultKubernetesVersion()).To(Equal(Kube130)) } func TestClusterWorkerNodeConfigCount(t *testing.T) { diff --git a/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml b/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml index dd4f10c4bb77..069c9a667452 100644 --- a/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml +++ b/pkg/api/v1alpha1/testdata/cluster_in_place_upgrade.yaml @@ -17,7 +17,7 @@ spec: upgradeRolloutStrategy: type: InPlace datacenterRef: {} - kubernetesVersion: "1.29" + kubernetesVersion: "1.30" managementCluster: name: test-cluster workerNodeGroupConfigurations: diff --git a/pkg/features/features.go b/pkg/features/features.go index 94b4b02d9b0a..7cb88ec06d58 100644 --- a/pkg/features/features.go +++ b/pkg/features/features.go @@ -8,7 +8,6 @@ const ( UseControllerForCli = "USE_CONTROLLER_FOR_CLI" VSphereInPlaceEnvVar = "VSPHERE_IN_PLACE_UPGRADE" APIServerExtraArgsEnabledEnvVar = "API_SERVER_EXTRA_ARGS_ENABLED" - K8s130SupportEnvVar = "K8S_1_30_SUPPORT" ) func FeedGates(featureGates []string) { @@ -65,11 +64,3 @@ func APIServerExtraArgsEnabled() Feature { IsActive: globalFeatures.isActiveForEnvVar(APIServerExtraArgsEnabledEnvVar), } } - -// K8s130Support is the feature flag for Kubernetes 1.30 support. -func K8s130Support() Feature { - return Feature{ - Name: "Kubernetes version 1.30 support", - IsActive: globalFeatures.isActiveForEnvVar(K8s130SupportEnvVar), - } -} diff --git a/pkg/features/features_test.go b/pkg/features/features_test.go index db46c689e675..739d5f4c146c 100644 --- a/pkg/features/features_test.go +++ b/pkg/features/features_test.go @@ -85,11 +85,3 @@ func TestAPIServerExtraArgsEnabledFeatureFlag(t *testing.T) { g.Expect(os.Setenv(APIServerExtraArgsEnabledEnvVar, "true")).To(Succeed()) g.Expect(IsActive(APIServerExtraArgsEnabled())).To(BeTrue()) } - -func TestWithK8s130FeatureFlag(t *testing.T) { - g := NewWithT(t) - setupContext(t) - - g.Expect(os.Setenv(K8s130SupportEnvVar, "true")).To(Succeed()) - g.Expect(IsActive(K8s130Support())).To(BeTrue()) -} diff --git a/pkg/validations/cluster.go b/pkg/validations/cluster.go index af8bc4761124..e4a75f656155 100644 --- a/pkg/validations/cluster.go +++ b/pkg/validations/cluster.go @@ -10,7 +10,6 @@ import ( "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" - "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/semver" @@ -268,13 +267,3 @@ func ValidateManagementComponentsVersionSkew(ctx context.Context, k KubectlClien } return nil } - -// ValidateK8s130Support checks if the 1.30 feature flag is set when using k8s 1.30. -func ValidateK8s130Support(clusterSpec *cluster.Spec) error { - if !features.IsActive(features.K8s130Support()) { - if clusterSpec.Cluster.Spec.KubernetesVersion == v1alpha1.Kube130 { - return fmt.Errorf("kubernetes version %s is not enabled. Please set the env variable %v", v1alpha1.Kube130, features.K8s130SupportEnvVar) - } - } - return nil -} diff --git a/pkg/validations/cluster_test.go b/pkg/validations/cluster_test.go index ab460b254406..553b598f13a7 100644 --- a/pkg/validations/cluster_test.go +++ b/pkg/validations/cluster_test.go @@ -15,7 +15,6 @@ import ( "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" - "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/providers" providermocks "github.com/aws/eks-anywhere/pkg/providers/mocks" "github.com/aws/eks-anywhere/pkg/types" @@ -743,18 +742,3 @@ func TestValidateManagementComponentsVersionSkew(t *testing.T) { }) } } - -func TestValidateK8s130Support(t *testing.T) { - tt := newTest(t) - tt.clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.Kube130 - tt.Expect(validations.ValidateK8s130Support(tt.clusterSpec)).To( - MatchError(ContainSubstring("kubernetes version 1.30 is not enabled. Please set the env variable K8S_1_30_SUPPORT"))) -} - -func TestValidateK8s130SupportActive(t *testing.T) { - tt := newTest(t) - tt.clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.Kube130 - features.ClearCache() - os.Setenv(features.K8s130SupportEnvVar, "true") - tt.Expect(validations.ValidateK8s130Support(tt.clusterSpec)).To(Succeed()) -} diff --git a/pkg/validations/createvalidations/preflightvalidations.go b/pkg/validations/createvalidations/preflightvalidations.go index 00bd5c76e6fd..76fa21e31a6c 100644 --- a/pkg/validations/createvalidations/preflightvalidations.go +++ b/pkg/validations/createvalidations/preflightvalidations.go @@ -7,7 +7,6 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" - "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/validations" ) @@ -50,14 +49,6 @@ func (v *CreateValidations) PreflightValidations(ctx context.Context) []validati Err: validations.ValidateEksaVersion(ctx, v.Opts.CliVersion, v.Opts.Spec), } }, - func() *validations.ValidationResult { - return &validations.ValidationResult{ - Name: "validate kubernetes version 1.30 support", - Remediation: fmt.Sprintf("ensure %v env variable is set", features.K8s130SupportEnvVar), - Err: validations.ValidateK8s130Support(v.Opts.Spec), - Silent: true, - } - }, } if v.Opts.Spec.Cluster.IsManaged() { diff --git a/pkg/validations/upgradevalidations/preflightvalidations.go b/pkg/validations/upgradevalidations/preflightvalidations.go index ff0612f82430..650bd9f941e2 100644 --- a/pkg/validations/upgradevalidations/preflightvalidations.go +++ b/pkg/validations/upgradevalidations/preflightvalidations.go @@ -9,7 +9,6 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/constants" - "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/validation" @@ -123,14 +122,6 @@ func (u *UpgradeValidations) PreflightValidations(ctx context.Context) []validat Err: validations.ValidatePauseAnnotation(ctx, k, targetCluster, targetCluster.Name), } }, - func() *validations.ValidationResult { - return &validations.ValidationResult{ - Name: "validate kubernetes version 1.30 support", - Remediation: fmt.Sprintf("ensure %v env variable is set", features.K8s130SupportEnvVar), - Err: validations.ValidateK8s130Support(u.Opts.Spec), - Silent: true, - } - }, } if u.Opts.Spec.Cluster.IsManaged() { diff --git a/test/framework/cluster.go b/test/framework/cluster.go index f6c72af2afa7..4f4ebbfcf66e 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -35,7 +35,6 @@ import ( "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/executables" - "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/git" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" @@ -2153,12 +2152,11 @@ func dumpFile(description, path string, t T) { func (e *ClusterE2ETest) setFeatureFlagForUnreleasedKubernetesVersion(version v1alpha1.KubernetesVersion) { // Update this variable to equal the feature flagged k8s version when applicable. // For example, if k8s 1.26 is under a feature flag, we would set this to v1alpha1.Kube126 - unreleasedK8sVersion := v1alpha1.Kube130 + var unreleasedK8sVersion v1alpha1.KubernetesVersion if version == unreleasedK8sVersion { // Set feature flag for the unreleased k8s version when applicable e.T.Logf("Setting k8s version support feature flag...") - os.Setenv(features.K8s130SupportEnvVar, "true") } } From 8e85e1e796ee36c550f957e1b1271204fd2bb41a Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Fri, 31 May 2024 16:33:11 -0700 Subject: [PATCH 178/193] Skip 130 multi endpoint test for cloudstack (#8236) Signed-off-by: Rahul Ganesh Co-authored-by: Rahul Ganesh --- test/e2e/SKIPPED_TESTS.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/SKIPPED_TESTS.yaml b/test/e2e/SKIPPED_TESTS.yaml index 263e041111ef..28b47ffd9bb0 100644 --- a/test/e2e/SKIPPED_TESTS.yaml +++ b/test/e2e/SKIPPED_TESTS.yaml @@ -24,6 +24,7 @@ skipped_tests: - TestCloudStackKubernetes127MultiEndpointSimpleFlow - TestCloudStackKubernetes128MultiEndpointSimpleFlow - TestCloudStackKubernetes129MultiEndpointSimpleFlow +- TestCloudStackKubernetes130MultiEndpointSimpleFlow # Side effects - TestCloudStackKubernetes129WithOIDCManagementClusterUpgradeFromLatestSideEffects From 89e57a832e094acc2f60ee7d436a8a2f2c22646a Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Mon, 3 Jun 2024 19:01:11 -0700 Subject: [PATCH 179/193] Update docs for worker node group config (#8241) --- .../getting-started/baremetal/bare-spec.md | 28 +++++++++---------- .../getting-started/cloudstack/cloud-spec.md | 16 +++++------ .../getting-started/nutanix/nutanix-spec.md | 16 +++++------ .../en/docs/getting-started/snow/snow-spec.md | 16 +++++------ .../getting-started/vsphere/vsphere-spec.md | 16 +++++------ 5 files changed, 46 insertions(+), 46 deletions(-) diff --git a/docs/content/en/docs/getting-started/baremetal/bare-spec.md b/docs/content/en/docs/getting-started/baremetal/bare-spec.md index 52ee32da1a95..5b96f402637f 100644 --- a/docs/content/en/docs/getting-started/baremetal/bare-spec.md +++ b/docs/content/en/docs/getting-started/baremetal/bare-spec.md @@ -184,63 +184,63 @@ You can omit `workerNodeGroupConfigurations` when creating Bare Metal clusters. >**_NOTE:_** Empty `workerNodeGroupConfigurations` is not supported when Kubernetes version <= 1.21. -### workerNodeGroupConfigurations.count (optional) +### workerNodeGroupConfigurations[*].count (optional) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with Tinkerbell-specific configuration for your nodes. See `TinkerbellMachineConfig Fields` below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration (optional) Configuration parameters for Cluster Autoscaler. >**_NOTE:_** Autoscaling configuration is not supported when using the `InPlace` upgrade rollout strategy. -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints (optional) +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels (optional) +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion (optional) +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. [Supported values]({{< relref "../../concepts/support-versions/#kubernetes-versions" >}}): `1.28`, `1.27`, `1.26`, `1.25`, `1.24` Must be less than or equal to the cluster `kubernetesVersion` defined at the root level of the cluster spec. The worker node kubernetesVersion must be no more than two minor Kubernetes versions lower than the cluster control plane's Kubernetes version. Removing `workerNodeGroupConfiguration.kubernetesVersion` will trigger an upgrade of the node group to the `kubernetesVersion` defined at the root level of the cluster spec. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy (optional) +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy (optional) Configuration parameters for upgrade strategy. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.type (optional) +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.type (optional) Default: `RollingUpdate` Type of rollout strategy. Supported values: `RollingUpdate`,`InPlace`. >**_NOTE:_** The upgrade rollout strategy type must be the same for all control plane and worker nodes. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate (optional) +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.rollingUpdate (optional) Configuration parameters for customizing rolling upgrade behavior. >**_NOTE:_** The rolling update parameters can only be configured if `upgradeRolloutStrategy.type` is `RollingUpdate`. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxSurge (optional) +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.rollingUpdate.maxSurge (optional) Default: 1 This can not be 0 if maxUnavailable is 0. @@ -249,7 +249,7 @@ The maximum number of machines that can be scheduled above the desired number of Example: When this is set to n, the new worker node group can be scaled up immediately by n when the rolling upgrade starts. Total number of machines in the cluster (old + new) never exceeds (desired number of machines + n). Once scale down happens and old machines are brought down, the new worker node group can be scaled up further ensuring that the total number of machines running at any time does not exceed the desired number of machines + n. -#### workerNodeGroupConfigurations.upgradeRolloutStrategy.rollingUpdate.maxUnavailable (optional) +#### workerNodeGroupConfigurations[*].upgradeRolloutStrategy.rollingUpdate.maxUnavailable (optional) Default: 0 This can not be 0 if MaxSurge is 0. diff --git a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md index ac9c064c2fae..b21ed6912eed 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md @@ -235,31 +235,31 @@ If this is a standalone cluster or if it were serving as the management cluster This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count (required) +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with CloudStack specific configuration for your nodes. See `CloudStackMachineConfig Fields` below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints (optional) +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels (optional) +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. A special label value is supported by the CAPC provider: @@ -273,7 +273,7 @@ The `ds.meta_data.failuredomain` value will be replaced with a failuredomain nam Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion (optional) +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 ## CloudStackDatacenterConfig diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md index 16378782e32c..fe844bd5510e 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md @@ -190,24 +190,24 @@ creation process are [here]({{< relref "./nutanix-prereq/#prepare-a-nutanix-envi ### workerNodeGroupConfigurations (required) This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count (required) +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if `autoscalingConfiguration` is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with Nutanix specific configuration for your nodes. See `NutanixMachineConfig` fields below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: `md-0`) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) -Minimum number of nodes for this node group’s autoscaling configuration. +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) +Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) -Maximum number of nodes for this node group’s autoscaling configuration. +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) +Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.kubernetesVersion (optional) +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 ### externalEtcdConfiguration.count (optional) diff --git a/docs/content/en/docs/getting-started/snow/snow-spec.md b/docs/content/en/docs/getting-started/snow/snow-spec.md index 91952e68c6eb..27d3ffa9b67a 100644 --- a/docs/content/en/docs/getting-started/snow/snow-spec.md +++ b/docs/content/en/docs/getting-started/snow/snow-spec.md @@ -147,38 +147,38 @@ the existing nodes. This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count (required) +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with Snow specific configuration for your nodes. See `SnowMachineConfig Fields` below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints (optional) +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must not have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels (optional) +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion (optional) +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. Supported values: 1.28, 1.27, 1.26, 1.25, 1.24 ### externalEtcdConfiguration.count (optional) diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md index a0cfa4a257bd..c91212588dda 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md @@ -159,38 +159,38 @@ the existing nodes. This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. -### workerNodeGroupConfigurations.count (required) +### workerNodeGroupConfigurations[*].count (required) Number of worker nodes. Optional if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. -### workerNodeGroupConfigurations.machineGroupRef (required) +### workerNodeGroupConfigurations[*].machineGroupRef (required) Refers to the Kubernetes object with vsphere specific configuration for your nodes. See [VSphereMachineConfig Fields](#vspheremachineconfig-fields) below. -### workerNodeGroupConfigurations.name (required) +### workerNodeGroupConfigurations[*].name (required) Name of the worker node group (default: md-0) -### workerNodeGroupConfigurations.autoscalingConfiguration.minCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.minCount (optional) Minimum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.autoscalingConfiguration.maxCount (optional) +### workerNodeGroupConfigurations[*].autoscalingConfiguration.maxCount (optional) Maximum number of nodes for this node group's autoscaling configuration. -### workerNodeGroupConfigurations.taints (optional) +### workerNodeGroupConfigurations[*].taints (optional) A list of taints to apply to the nodes in the worker node group. Modifying the taints associated with a worker node group configuration will cause new nodes to be rolled-out, replacing the existing nodes associated with the configuration. At least one node group must **NOT** have `NoSchedule` or `NoExecute` taints applied to it. -### workerNodeGroupConfigurations.labels (optional) +### workerNodeGroupConfigurations[*].labels (optional) A list of labels to apply to the nodes in the worker node group. This is in addition to the labels that EKS Anywhere will add by default. Modifying the labels associated with a worker node group configuration will cause new nodes to be rolled out, replacing the existing nodes associated with the configuration. -### workerNodeGroupConfigurations.kubernetesVersion (optional) +### workerNodeGroupConfigurations[*].kubernetesVersion (optional) The Kubernetes version you want to use for this worker node group. [Supported values]({{< relref "../../concepts/support-versions/#kubernetes-versions" >}}): `1.28`, `1.27`, `1.26`, `1.25`, `1.24` Must be less than or equal to the cluster `kubernetesVersion` defined at the root level of the cluster spec. The worker node kubernetesVersion must be no more than two minor Kubernetes versions lower than the cluster control plane's Kubernetes version. Removing `workerNodeGroupConfiguration.kubernetesVersion` will trigger an upgrade of the node group to the `kubernetesVersion` defined at the root level of the cluster spec. From 44e2d2d018f47c0c8d74f570ded1c1f704cabb90 Mon Sep 17 00:00:00 2001 From: Mitali Paygude Date: Tue, 4 Jun 2024 01:29:37 -0700 Subject: [PATCH 180/193] Kubelet Configuration for cp and wn Nutanix (#8216) --- .../cloudstack/config/template-cp.yaml | 15 ++++ .../cloudstack/config/template-md.yaml | 13 +++- pkg/providers/cloudstack/template.go | 23 ++++++ pkg/providers/cloudstack/template_test.go | 30 ++++++++ pkg/providers/nutanix/config/cp-template.yaml | 15 ++++ pkg/providers/nutanix/config/md-template.yaml | 13 +++- pkg/providers/nutanix/template.go | 22 ++++++ pkg/providers/nutanix/template_test.go | 38 ++++++++++ .../testdata/eksa-cluster-invalid-kc.yaml | 73 +++++++++++++++++++ pkg/providers/vsphere/config/template-cp.yaml | 15 ++++ pkg/providers/vsphere/config/template-md.yaml | 13 +++- pkg/providers/vsphere/template.go | 23 ++++++ pkg/providers/vsphere/template_test.go | 30 ++++++++ 13 files changed, 320 insertions(+), 3 deletions(-) create mode 100644 pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml diff --git a/pkg/providers/cloudstack/config/template-cp.yaml b/pkg/providers/cloudstack/config/template-cp.yaml index ed64fa9f0acd..ee9115402b30 100644 --- a/pkg/providers/cloudstack/config/template-cp.yaml +++ b/pkg/providers/cloudstack/config/template-cp.yaml @@ -156,6 +156,13 @@ spec: {{ .schedulerExtraArgs.ToYaml | indent 10 }} {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8}} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .encryptionProviderConfig }} - content: | {{ .encryptionProviderConfig | indent 8}} @@ -294,6 +301,10 @@ spec: path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem {{- end}} initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: @@ -316,6 +327,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: diff --git a/pkg/providers/cloudstack/config/template-md.yaml b/pkg/providers/cloudstack/config/template-md.yaml index 81df17e3cc60..4229bbe63874 100644 --- a/pkg/providers/cloudstack/config/template-md.yaml +++ b/pkg/providers/cloudstack/config/template-md.yaml @@ -7,6 +7,10 @@ spec: template: spec: joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock {{- if .workerNodeGroupTaints }} @@ -29,9 +33,16 @@ spec: {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} name: "{{`{{ ds.meta_data.hostname }}`}}" -{{- if or .proxyConfig .registryMirrorMap }} +{{- if or (or .proxyConfig .registryMirrorMap) .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .proxyConfig }} - content: | [Service] diff --git a/pkg/providers/cloudstack/template.go b/pkg/providers/cloudstack/template.go index ed4149947047..eb6cb5fdf081 100644 --- a/pkg/providers/cloudstack/template.go +++ b/pkg/providers/cloudstack/template.go @@ -4,6 +4,8 @@ import ( "fmt" "net" + "sigs.k8s.io/yaml" + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" @@ -257,6 +259,17 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec) (map[string]interface{}, erro values["encryptionProviderConfig"] = conf } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } @@ -389,6 +402,16 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupConfiguration fillProxyConfigurations(values, clusterSpec, endpoint) } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } diff --git a/pkg/providers/cloudstack/template_test.go b/pkg/providers/cloudstack/template_test.go index 76c4043d3715..a027ae93a68e 100644 --- a/pkg/providers/cloudstack/template_test.go +++ b/pkg/providers/cloudstack/template_test.go @@ -6,6 +6,7 @@ import ( "time" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -169,3 +170,32 @@ func TestTemplateBuilder_CertSANs(t *testing.T) { test.AssertContentToFile(t, string(data), tc.Output) } } + +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigWN(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) + spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + builder := cloudstack.NewTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneValidKubeletConfigCP(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) + spec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + spec.Cluster.Spec.ExternalEtcdConfiguration = nil + builder := cloudstack.NewTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { + values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) + }) + g.Expect(err).ToNot(HaveOccurred()) +} diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index a660cc953668..9d147c482598 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -147,6 +147,13 @@ spec: imageTag: {{.etcdImageTag}} {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .encryptionProviderConfig }} - content: | {{ .encryptionProviderConfig | indent 8}} @@ -294,6 +301,10 @@ spec: owner: root:root path: /etc/kubernetes/audit-policy.yaml initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: kubeletExtraArgs: cloud-provider: external @@ -316,6 +327,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: diff --git a/pkg/providers/nutanix/config/md-template.yaml b/pkg/providers/nutanix/config/md-template.yaml index d62afebeb444..ac63a9ffff75 100644 --- a/pkg/providers/nutanix/config/md-template.yaml +++ b/pkg/providers/nutanix/config/md-template.yaml @@ -111,6 +111,10 @@ spec: {{- end }} - hostnamectl set-hostname "{{`{{ ds.meta_data.hostname }}`}}" joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: kubeletExtraArgs: cloud-provider: external @@ -139,9 +143,16 @@ spec: sudo: ALL=(ALL) NOPASSWD:ALL sshAuthorizedKeys: - "{{.workerSshAuthorizedKey}}" -{{- if or .proxyConfig .registryMirrorMap }} +{{- if or (or .proxyConfig .registryMirrorMap) .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .proxyConfig }} - content: | [Service] diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index 1b0f0034513a..f1cb5e2493c7 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" + "sigs.k8s.io/yaml" + "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -313,6 +315,17 @@ func buildTemplateMapCP( values["encryptionProviderConfig"] = conf } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } @@ -389,6 +402,15 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1 values["additionalCategories"] = workerNodeGroupMachineSpec.AdditionalCategories } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } return values, nil } diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index 9cdaf37f61a6..d931c0bf6040 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/internal/test" @@ -87,6 +88,43 @@ func TestNewNutanixTemplateBuilder(t *testing.T) { assert.Equal(t, expectedSecret, secretSpec) } +func TestNewNutanixTemplateBuilderKubeletConfiguration(t *testing.T) { + dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") + creds := GetCredsFromEnv() + builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) + assert.NotNil(t, builder) + + buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster.yaml") + buildSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + + buildSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + + spec, err := builder.GenerateCAPISpecControlPlane(buildSpec) + assert.NoError(t, err) + assert.NotNil(t, spec) + + workloadTemplateNames := map[string]string{ + "eksa-unit-test": "eksa-unit-test", + } + kubeadmconfigTemplateNames := map[string]string{ + "eksa-unit-test": "eksa-unit-test", + } + workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) + assert.NoError(t, err) + assert.NotNil(t, workerSpec) +} + func TestNewNutanixTemplateBuilderGenerateCAPISpecControlPlaneFailure(t *testing.T) { dcConf, machineConf, workerConfs := minimalNutanixConfigSpec(t) diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml new file mode 100644 index 000000000000..18b4ec1eabbf --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml @@ -0,0 +1,73 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: test-ip + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + kubeletConfiguration: + kind: KubeletConfiguration + maxPods: 20 + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index d7a4af50c5f2..3715e523b022 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -246,6 +246,13 @@ spec: certificatesDir: /var/lib/kubeadm/pki {{- end }} files: +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 8}} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if .encryptionProviderConfig }} - content: | {{ .encryptionProviderConfig | indent 8}} @@ -393,6 +400,10 @@ spec: path: /var/lib/kubeadm/aws-iam-authenticator/pki/key.pem {{- end}} initConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: @@ -415,6 +426,10 @@ spec: {{- end }} {{- end }} joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} {{- if (eq .format "bottlerocket") }} pause: imageRepository: {{.pauseRepository}} diff --git a/pkg/providers/vsphere/config/template-md.yaml b/pkg/providers/vsphere/config/template-md.yaml index 6637ba98eeab..164b6c2de1a8 100644 --- a/pkg/providers/vsphere/config/template-md.yaml +++ b/pkg/providers/vsphere/config/template-md.yaml @@ -7,6 +7,10 @@ spec: template: spec: joinConfiguration: +{{- if .kubeletConfiguration }} + patches: + directory: /etc/kubernetes/patches +{{- end }} {{- if (eq .format "bottlerocket") }} pause: imageRepository: {{.pauseRepository}} @@ -76,9 +80,16 @@ spec: {{ .kubeletExtraArgs.ToYaml | indent 12 }} {{- end }} name: '{{"{{"}} ds.meta_data.hostname {{"}}"}}' -{{- if and (ne .format "bottlerocket") (or .proxyConfig .registryMirrorMap) }} +{{- if or (and (ne .format "bottlerocket") (or .proxyConfig .registryMirrorMap)) .kubeletConfiguration }} files: {{- end }} +{{- if .kubeletConfiguration }} + - content: | +{{ .kubeletConfiguration | indent 10 }} + owner: root:root + permissions: "0644" + path: /etc/kubernetes/patches/kubeletconfiguration0+strategic.yaml +{{- end }} {{- if and .proxyConfig (ne .format "bottlerocket") }} - content: | [Service] diff --git a/pkg/providers/vsphere/template.go b/pkg/providers/vsphere/template.go index 4be5dd51d9c2..cbd6faac1b3f 100644 --- a/pkg/providers/vsphere/template.go +++ b/pkg/providers/vsphere/template.go @@ -3,6 +3,8 @@ package vsphere import ( "fmt" + "sigs.k8s.io/yaml" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/clusterapi" @@ -354,6 +356,17 @@ func buildTemplateMapCP( values["encryptionProviderConfig"] = conf } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration != nil { + cpKubeletConfig := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration.Object + + kcString, err := yaml.Marshal(cpKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { values["upgradeRolloutStrategy"] = true if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.Type == anywherev1.InPlaceStrategyType { @@ -488,5 +501,15 @@ func buildTemplateMapMD( values["bottlerocketSettings"] = brSettings } + if workerNodeGroupConfiguration.KubeletConfiguration != nil { + wnKubeletConfig := workerNodeGroupConfiguration.KubeletConfiguration.Object + kcString, err := yaml.Marshal(wnKubeletConfig) + if err != nil { + return nil, fmt.Errorf("error marshaling %v", err) + } + + values["kubeletConfiguration"] = string(kcString) + } + return values, nil } diff --git a/pkg/providers/vsphere/template_test.go b/pkg/providers/vsphere/template_test.go index 56af1c73f4e4..4ca420d9945a 100644 --- a/pkg/providers/vsphere/template_test.go +++ b/pkg/providers/vsphere/template_test.go @@ -5,8 +5,10 @@ import ( "time" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/aws/eks-anywhere/internal/test" + "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/providers/vsphere" ) @@ -55,6 +57,34 @@ func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidEtcdSSHKey(t * ) } +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigWN(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml") + spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + builder := vsphere.NewVsphereTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecWorkers(spec, nil, nil) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestVsphereTemplateBuilderGenerateCAPISpecControlPlaneInvalidKubeletConfigCP(t *testing.T) { + g := NewWithT(t) + spec := test.NewFullClusterSpec(t, "testdata/cluster_main.yaml") + spec.Cluster.Spec.ControlPlaneConfiguration.KubeletConfiguration = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "maxPods": 20, + }, + } + builder := vsphere.NewVsphereTemplateBuilder(time.Now) + _, err := builder.GenerateCAPISpecControlPlane(spec, func(values map[string]interface{}) { + values["controlPlaneTemplateName"] = clusterapi.ControlPlaneMachineTemplateName(spec.Cluster) + }) + g.Expect(err).ToNot(HaveOccurred()) +} + func TestTemplateBuilder_CertSANs(t *testing.T) { t.Setenv(config.EksavSphereUsernameKey, expectedVSphereUsername) t.Setenv(config.EksavSpherePasswordKey, expectedVSpherePassword) From a209c554e73037c0dfa1d6a614dcca7726fa35e2 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Tue, 4 Jun 2024 10:48:10 -0700 Subject: [PATCH 181/193] Add Tinkerbell RTOS image env var to quick E2E tests (#8246) --- cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml index 2f6fe0cfc7ba..2fd7871a1662 100644 --- a/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml +++ b/cmd/integration_test/build/buildspecs/quick-test-eks-a-cli.yml @@ -147,6 +147,7 @@ env: T_TINKERBELL_IMAGE_UBUNTU_2204_1_27: "tinkerbell_ci:image_ubuntu_2204_1_27" T_TINKERBELL_IMAGE_UBUNTU_2204_1_28: "tinkerbell_ci:image_ubuntu_2204_1_28" T_TINKERBELL_IMAGE_UBUNTU_2204_1_29: "tinkerbell_ci:image_ubuntu_2204_1_29" + T_TINKERBELL_IMAGE_UBUNTU_2204_1_29_RTOS: "tinkerbell_ci:image_ubuntu_2204_1_29_rtos" T_TINKERBELL_IMAGE_UBUNTU_2204_1_30: "tinkerbell_ci:image_ubuntu_2204_1_30" T_TINKERBELL_IMAGE_REDHAT_1_24: "tinkerbell_ci:image_redhat_1_24" T_TINKERBELL_IMAGE_REDHAT_1_25: "tinkerbell_ci:image_redhat_1_25" From 337d0af135f38a0c33065208825b6bbe01346e58 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh <31204974+rahulbabu95@users.noreply.github.com> Date: Tue, 4 Jun 2024 12:17:55 -0700 Subject: [PATCH 182/193] Update generate cluster config command for Tinkerbell (#8226) Manually merging overriding code cov as these are generate commands and do not have tests. * Update generate cluster config command for Tinkerbell Signed-off-by: Rahul Ganesh --- pkg/api/v1alpha1/tinkerbelldatacenterconfig.go | 4 +++- pkg/api/v1alpha1/tinkerbellmachineconfig.go | 1 + pkg/api/v1alpha1/tinkerbellmachineconfig_types.go | 2 +- pkg/providers/tinkerbell/assert_test.go | 12 ++++++++++++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go b/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go index 1a2cc8fcf062..817eaca2dc8a 100644 --- a/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go +++ b/pkg/api/v1alpha1/tinkerbelldatacenterconfig.go @@ -22,7 +22,9 @@ func NewTinkerbellDatacenterConfigGenerate(clusterName string) *TinkerbellDatace ObjectMeta: ObjectMeta{ Name: clusterName, }, - Spec: TinkerbellDatacenterConfigSpec{}, + Spec: TinkerbellDatacenterConfigSpec{ + TinkerbellIP: "", + }, } } diff --git a/pkg/api/v1alpha1/tinkerbellmachineconfig.go b/pkg/api/v1alpha1/tinkerbellmachineconfig.go index 6caf77e7ea0b..c952aa7e9df2 100644 --- a/pkg/api/v1alpha1/tinkerbellmachineconfig.go +++ b/pkg/api/v1alpha1/tinkerbellmachineconfig.go @@ -27,6 +27,7 @@ func NewTinkerbellMachineConfigGenerate(name string, opts ...TinkerbellMachineCo Spec: TinkerbellMachineConfigSpec{ HardwareSelector: HardwareSelector{}, OSFamily: Ubuntu, + OSImageURL: "", Users: []UserConfiguration{ { Name: "ec2-user", diff --git a/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go b/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go index 8de153fbeef8..68304aba1568 100644 --- a/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go +++ b/pkg/api/v1alpha1/tinkerbellmachineconfig_types.go @@ -18,7 +18,7 @@ type TinkerbellMachineConfigSpec struct { // OSImageURL is a URL to the OS image used during provisioning. It must include // the Kubernetes version(s). For example, a URL used for Kubernetes 1.27 could // be http://localhost:8080/ubuntu-2204-1.27.tgz - OSImageURL string `json:"osImageURL,omitempty"` + OSImageURL string `json:"osImageURL"` Users []UserConfiguration `json:"users,omitempty"` HostOSConfiguration *HostOSConfiguration `json:"hostOSConfiguration,omitempty"` } diff --git a/pkg/providers/tinkerbell/assert_test.go b/pkg/providers/tinkerbell/assert_test.go index c306360a90e0..900033f0c47b 100644 --- a/pkg/providers/tinkerbell/assert_test.go +++ b/pkg/providers/tinkerbell/assert_test.go @@ -143,6 +143,18 @@ func TestAssertMachineConfigK8sVersionBRWorker_Error(t *testing.T) { g.Expect(err).ToNot(gomega.Succeed()) } +func TestAssertMachineConfigK8sVersionBRModularWorker_Error(t *testing.T) { + g := gomega.NewWithT(t) + builder := NewDefaultValidClusterSpecBuilder() + clusterSpec := builder.Build() + kube129 := eksav1alpha1.Kube129 + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + clusterSpec.Spec.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubernetesVersion = &kube129 + clusterSpec.MachineConfigs[builder.WorkerNodeGroupMachineName].Spec.OSFamily = "bottlerocket" + err := tinkerbell.AssertOsFamilyValid(clusterSpec) + g.Expect(err).ToNot(gomega.Succeed()) +} + func TestAssertMachineConfigK8sVersionBR_Success(t *testing.T) { g := gomega.NewWithT(t) builder := NewDefaultValidClusterSpecBuilder() From a5232319ffd45cfbe8c72a31e133d46ae72b345f Mon Sep 17 00:00:00 2001 From: Tanvir Tatla Date: Tue, 4 Jun 2024 14:24:06 -0700 Subject: [PATCH 183/193] Upgrade registry certificate in packages (#7881) * upgrade packages workflow * Change PackageInstaller to PackageManager --- Makefile | 2 +- cmd/eksctl-anywhere/cmd/createcluster.go | 6 +- cmd/eksctl-anywhere/cmd/upgradecluster.go | 4 +- .../packagecontrollerclient.go | 7 ++ .../packagecontrollerclient_test.go | 49 +++++++++++++ pkg/curatedpackages/packageinstaller.go | 17 +++++ pkg/dependencies/factory.go | 44 ++++++++---- pkg/dependencies/factory_test.go | 40 ++++++++++- pkg/task/task.go | 2 +- pkg/workflows/interfaces/interfaces.go | 4 +- pkg/workflows/interfaces/mocks/clients.go | 42 +++++++---- pkg/workflows/management/create.go | 72 +++++++++---------- .../management/create_curated_packages.go | 2 +- pkg/workflows/management/create_test.go | 4 +- .../management/post_cluster_upgrade.go | 2 +- pkg/workflows/management/upgrade.go | 4 ++ .../management/upgrade_curated_packages.go | 31 ++++++++ pkg/workflows/management/upgrade_test.go | 49 +++++++++++++ pkg/workflows/workload/create.go | 4 +- pkg/workflows/workload/create_test.go | 4 +- pkg/workflows/workload/upgrade.go | 4 +- pkg/workflows/workload/upgrade_test.go | 4 +- 22 files changed, 311 insertions(+), 86 deletions(-) create mode 100644 pkg/workflows/management/upgrade_curated_packages.go diff --git a/Makefile b/Makefile index a02bec980b18..79d7c19620fb 100644 --- a/Makefile +++ b/Makefile @@ -568,7 +568,7 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/bootstrapper/mocks/bootstrapper.go -package=mocks "github.com/aws/eks-anywhere/pkg/bootstrapper" ClusterClient ${MOCKGEN} -destination=pkg/git/providers/github/mocks/github.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/providers/github" GithubClient ${MOCKGEN} -destination=pkg/git/mocks/git.go -package=mocks "github.com/aws/eks-anywhere/pkg/git" Client,ProviderClient - ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover + ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageManager,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover ${MOCKGEN} -destination=pkg/git/gogithub/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gogithub" Client ${MOCKGEN} -destination=pkg/git/gitclient/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gitclient" GoGit ${MOCKGEN} -destination=pkg/validations/mocks/docker.go -package=mocks "github.com/aws/eks-anywhere/pkg/validations" DockerExecutable diff --git a/cmd/eksctl-anywhere/cmd/createcluster.go b/cmd/eksctl-anywhere/cmd/createcluster.go index 6452dafc6892..dc5b576d74b1 100644 --- a/cmd/eksctl-anywhere/cmd/createcluster.go +++ b/cmd/eksctl-anywhere/cmd/createcluster.go @@ -185,7 +185,7 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er WithGitOpsFlux(clusterSpec.Cluster, clusterSpec.FluxConfig, cliConfig). WithWriter(). WithEksdInstaller(). - WithPackageInstaller(clusterSpec, cc.installPackages, cc.managementKubeconfig). + WithPackageManager(clusterSpec, cc.installPackages, cc.managementKubeconfig). WithValidatorClients(). WithCreateClusterDefaulter(createCLIConfig). WithClusterApplier(). @@ -255,7 +255,7 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er deps.GitOpsFlux, deps.Writer, deps.EksdInstaller, - deps.PackageInstaller, + deps.PackageManager, deps.ClusterCreator, deps.UnAuthKubectlClient, ) @@ -272,7 +272,7 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er deps.GitOpsFlux, deps.Writer, deps.EksdInstaller, - deps.PackageInstaller, + deps.PackageManager, deps.ClusterCreator, deps.EksaInstaller, deps.ClusterMover, diff --git a/cmd/eksctl-anywhere/cmd/upgradecluster.go b/cmd/eksctl-anywhere/cmd/upgradecluster.go index cb199030269f..27866be23897 100644 --- a/cmd/eksctl-anywhere/cmd/upgradecluster.go +++ b/cmd/eksctl-anywhere/cmd/upgradecluster.go @@ -159,6 +159,7 @@ func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command, args []strin WithEksdInstaller(). WithKubectl(). WithValidatorClients(). + WithPackageManagerWithoutWait(clusterSpec, "", uc.managementKubeconfig). WithUpgradeClusterDefaulter(upgradeCLIConfig) if uc.timeoutOptions.noTimeouts { @@ -212,6 +213,7 @@ func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command, args []strin deps.EksdUpgrader, deps.EksdInstaller, deps.ClusterApplier, + deps.PackageManager, ) err = upgrade.Run(ctx, clusterSpec, managementCluster, upgradeValidations) @@ -225,7 +227,7 @@ func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command, args []strin deps.Writer, deps.ClusterApplier, deps.EksdInstaller, - deps.PackageInstaller, + deps.PackageManager, ) err = upgradeWorkloadCluster.Run(ctx, workloadCluster, clusterSpec, upgradeValidations) } diff --git a/pkg/curatedpackages/packagecontrollerclient.go b/pkg/curatedpackages/packagecontrollerclient.go index 6d8e886ce7b6..493b6ed107eb 100644 --- a/pkg/curatedpackages/packagecontrollerclient.go +++ b/pkg/curatedpackages/packagecontrollerclient.go @@ -645,3 +645,10 @@ func WithRegistryAccessTester(registryTester RegistryAccessTester) func(client * config.registryAccessTester = registryTester } } + +// WithSkipWait sets skipWaitForPackageBundle. +func WithSkipWait() func(client *PackageControllerClient) { + return func(config *PackageControllerClient) { + config.skipWaitForPackageBundle = true + } +} diff --git a/pkg/curatedpackages/packagecontrollerclient_test.go b/pkg/curatedpackages/packagecontrollerclient_test.go index d31d3a4d6f35..dcdc9e44585d 100644 --- a/pkg/curatedpackages/packagecontrollerclient_test.go +++ b/pkg/curatedpackages/packagecontrollerclient_test.go @@ -524,6 +524,55 @@ func TestEnableWithEmptyProxy(t *testing.T) { } } +func TestEnableWithSkipWait(t *testing.T) { + for _, tt := range newPackageControllerTests(t) { + tt.command = curatedpackages.NewPackageControllerClient( + tt.chartManager, tt.kubectl, "billy", tt.kubeConfig, tt.chart, + tt.registryMirror, + curatedpackages.WithEksaSecretAccessKey(tt.eksaAccessKey), + curatedpackages.WithEksaRegion(tt.eksaRegion), + curatedpackages.WithEksaAccessKeyId(tt.eksaAccessID), + curatedpackages.WithSkipWait(), + curatedpackages.WithManagementClusterName(tt.clusterName), + curatedpackages.WithValuesFileWriter(tt.writer), + ) + clusterName := fmt.Sprintf("clusterName=%s", "billy") + valueFilePath := filepath.Join("billy", filewriter.DefaultTmpFolder, valueFileName) + ociURI := fmt.Sprintf("%s%s", "oci://", tt.registryMirror.ReplaceRegistry(tt.chart.Image())) + sourceRegistry, defaultRegistry, defaultImageRegistry := tt.command.GetCuratedPackagesRegistries(context.Background()) + sourceRegistry = fmt.Sprintf("sourceRegistry=%s", sourceRegistry) + defaultRegistry = fmt.Sprintf("defaultRegistry=%s", defaultRegistry) + defaultImageRegistry = fmt.Sprintf("defaultImageRegistry=%s", defaultImageRegistry) + if tt.registryMirror != nil { + t.Setenv("REGISTRY_USERNAME", "username") + t.Setenv("REGISTRY_PASSWORD", "password") + } else { + if tt.eksaRegion == "" { + tt.eksaRegion = "us-west-2" + } + defaultImageRegistry = strings.ReplaceAll(defaultImageRegistry, "us-west-2", tt.eksaRegion) + } + values := []string{sourceRegistry, defaultRegistry, defaultImageRegistry, clusterName} + if (tt.eksaAccessID == "" || tt.eksaAccessKey == "") && tt.registryMirror == nil { + values = append(values, "cronjob.suspend=true") + } + tt.chartManager.EXPECT().InstallChart(tt.ctx, tt.chart.Name, ociURI, tt.chart.Tag(), tt.kubeConfig, constants.EksaPackagesName, valueFilePath, false, values).Return(nil) + tt.kubectl.EXPECT(). + GetObject(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(getPBCSuccess(t)). + AnyTimes() + tt.kubectl.EXPECT(). + HasResource(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_, _, _, _, _ interface{}) (bool, error) { return true, nil }). + AnyTimes() + + err := tt.command.Enable(tt.ctx) + if err != nil { + t.Errorf("Install Controller Should succeed when installation passes") + } + } +} + func TestEnableFail(t *testing.T) { for _, tt := range newPackageControllerTests(t) { clusterName := fmt.Sprintf("clusterName=%s", "billy") diff --git a/pkg/curatedpackages/packageinstaller.go b/pkg/curatedpackages/packageinstaller.go index ba473be21c43..02f93cd48571 100644 --- a/pkg/curatedpackages/packageinstaller.go +++ b/pkg/curatedpackages/packageinstaller.go @@ -67,6 +67,23 @@ func (pi *Installer) InstallCuratedPackages(ctx context.Context) { } } +// UpgradeCuratedPackages upgrades curated packages as part of the cluster upgrade. +func (pi *Installer) UpgradeCuratedPackages(ctx context.Context) { + if IsPackageControllerDisabled(pi.spec.Cluster) { + logger.Info("Package controller disabled") + return + } + PrintLicense() + if err := pi.installPackagesController(ctx); err != nil { + logger.MarkWarning("Failed to upgrade the optional EKS-A Curated Package Controller.", "warning", err) + return + } + + if err := pi.installPackages(ctx); err != nil { + logger.MarkWarning("Failed upgrading curated packages on the cluster.", "error", err) + } +} + func (pi *Installer) installPackagesController(ctx context.Context) error { logger.Info("Enabling curated packages on the cluster") err := pi.packageController.Enable(ctx) diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 312090557e99..60f855589f86 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -94,7 +94,7 @@ type Dependencies struct { closers []types.Closer CliConfig *cliconfig.CliConfig CreateCliConfig *cliconfig.CreateClusterCLIConfig - PackageInstaller interfaces.PackageInstaller + PackageManager interfaces.PackageManager BundleRegistry curatedpackages.BundleRegistry PackageControllerClient *curatedpackages.PackageControllerClient PackageClient curatedpackages.PackageHandler @@ -1322,16 +1322,17 @@ func (f *Factory) WithGitOpsFlux(clusterConfig *v1alpha1.Cluster, fluxConfig *v1 return f } -func (f *Factory) WithPackageInstaller(spec *cluster.Spec, packagesLocation, kubeConfig string) *Factory { +// WithPackageManager builds a package manager. +func (f *Factory) WithPackageManager(spec *cluster.Spec, packagesLocation, kubeConfig string) *Factory { f.WithKubectl().WithPackageControllerClient(spec, kubeConfig).WithPackageClient() - f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { - if f.dependencies.PackageInstaller != nil { + f.buildSteps = append(f.buildSteps, func(_ context.Context) error { + if f.dependencies.PackageManager != nil { return nil } managementClusterName := getManagementClusterName(spec) mgmtKubeConfig := kubeconfig.ResolveFilename(kubeConfig, managementClusterName) - f.dependencies.PackageInstaller = curatedpackages.NewInstaller( + f.dependencies.PackageManager = curatedpackages.NewInstaller( f.dependencies.Kubectl, f.dependencies.PackageClient, f.dependencies.PackageControllerClient, @@ -1344,10 +1345,18 @@ func (f *Factory) WithPackageInstaller(spec *cluster.Spec, packagesLocation, kub return f } -func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig string) *Factory { +// WithPackageManagerWithoutWait builds a package manager that doesn't wait for active bundles. +func (f *Factory) WithPackageManagerWithoutWait(spec *cluster.Spec, packagesLocation, kubeConfig string) *Factory { + f.WithPackageControllerClient(spec, kubeConfig, curatedpackages.WithSkipWait()). + WithPackageManager(spec, packagesLocation, kubeConfig) + return f +} + +// WithPackageControllerClient builds a client for package controller. +func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig string, opts ...curatedpackages.PackageControllerClientOpt) *Factory { f.WithHelm(helm.WithInsecure()).WithKubectl() - f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { + f.buildSteps = append(f.buildSteps, func(_ context.Context) error { if f.dependencies.PackageControllerClient != nil || spec == nil { return nil } @@ -1374,13 +1383,8 @@ func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig str if bundle == nil { return fmt.Errorf("could not find VersionsBundle") } - f.dependencies.PackageControllerClient = curatedpackages.NewPackageControllerClient( - f.dependencies.Helm, - f.dependencies.Kubectl, - spec.Cluster.Name, - mgmtKubeConfig, - &bundle.PackageController.HelmChart, - f.registryMirror, + + options := []curatedpackages.PackageControllerClientOpt{ curatedpackages.WithEksaAccessKeyId(eksaAccessKeyID), curatedpackages.WithEksaSecretAccessKey(eksaSecretKey), curatedpackages.WithEksaRegion(eksaRegion), @@ -1391,6 +1395,18 @@ func (f *Factory) WithPackageControllerClient(spec *cluster.Spec, kubeConfig str curatedpackages.WithManagementClusterName(managementClusterName), curatedpackages.WithValuesFileWriter(writer), curatedpackages.WithClusterSpec(spec), + } + + options = append(options, opts...) + + f.dependencies.PackageControllerClient = curatedpackages.NewPackageControllerClient( + f.dependencies.Helm, + f.dependencies.Kubectl, + spec.Cluster.Name, + mgmtKubeConfig, + &bundle.PackageController.HelmChart, + f.registryMirror, + options..., ) return nil }) diff --git a/pkg/dependencies/factory_test.go b/pkg/dependencies/factory_test.go index d8d0e1c9bb81..7e43b4246630 100644 --- a/pkg/dependencies/factory_test.go +++ b/pkg/dependencies/factory_test.go @@ -433,10 +433,46 @@ func TestFactoryBuildWithPackageInstaller(t *testing.T) { WithLocalExecutables(). WithHelm(helm.WithInsecure()). WithKubectl(). - WithPackageInstaller(spec, "/test/packages.yaml", "kubeconfig.kubeconfig"). + WithPackageManager(spec, "/test/packages.yaml", "kubeconfig.kubeconfig"). Build(context.Background()) tt.Expect(err).To(BeNil()) - tt.Expect(deps.PackageInstaller).NotTo(BeNil()) + tt.Expect(deps.PackageManager).NotTo(BeNil()) +} + +func TestFactoryBuildWithPackageInstallerWithoutWait(t *testing.T) { + spec := &cluster.Spec{ + Config: &cluster.Config{ + Cluster: &anywherev1.Cluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: anywherev1.ClusterSpec{ + KubernetesVersion: "1.19", + }, + }, + }, + VersionsBundles: map[anywherev1.KubernetesVersion]*cluster.VersionsBundle{ + "1.19": { + VersionsBundle: &v1alpha1.VersionsBundle{ + PackageController: v1alpha1.PackageBundle{ + HelmChart: v1alpha1.Image{ + URI: "test_registry/test/eks-anywhere-packages:v1", + Name: "test_chart", + }, + }, + }, + }, + }, + } + tt := newTest(t, vsphere) + deps, err := dependencies.NewFactory(). + WithLocalExecutables(). + WithHelm(helm.WithInsecure()). + WithKubectl(). + WithPackageManagerWithoutWait(spec, "/test/packages.yaml", "kubeconfig.kubeconfig"). + Build(context.Background()) + tt.Expect(err).To(BeNil()) + tt.Expect(deps.PackageManager).NotTo(BeNil()) } func TestFactoryBuildWithCuratedPackagesCustomRegistry(t *testing.T) { diff --git a/pkg/task/task.go b/pkg/task/task.go index 24ac57991786..118e6176f910 100644 --- a/pkg/task/task.go +++ b/pkg/task/task.go @@ -36,7 +36,7 @@ type CommandContext struct { Writer filewriter.FileWriter EksdInstaller interfaces.EksdInstaller EksaInstaller interfaces.EksaInstaller - PackageInstaller interfaces.PackageInstaller + PackageManager interfaces.PackageManager EksdUpgrader interfaces.EksdUpgrader ClusterUpgrader interfaces.ClusterUpgrader ClusterCreator interfaces.ClusterCreator diff --git a/pkg/workflows/interfaces/interfaces.go b/pkg/workflows/interfaces/interfaces.go index 37b2d0e01c9a..182765cc4720 100644 --- a/pkg/workflows/interfaces/interfaces.go +++ b/pkg/workflows/interfaces/interfaces.go @@ -76,8 +76,10 @@ type EksdUpgrader interface { Upgrade(ctx context.Context, cluster *types.Cluster, currentSpec, newSpec *cluster.Spec) error } -type PackageInstaller interface { +// PackageManager handles installation and upgrades of curated packages. +type PackageManager interface { InstallCuratedPackages(ctx context.Context) + UpgradeCuratedPackages(ctx context.Context) } // ClusterUpgrader upgrades the cluster and waits until it's ready. diff --git a/pkg/workflows/interfaces/mocks/clients.go b/pkg/workflows/interfaces/mocks/clients.go index 1e534ea03926..bdb7d47c53cf 100644 --- a/pkg/workflows/interfaces/mocks/clients.go +++ b/pkg/workflows/interfaces/mocks/clients.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover) +// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageManager,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller,ClusterDeleter,ClusterMover) // Package mocks is a generated GoMock package. package mocks @@ -685,39 +685,51 @@ func (mr *MockEksdUpgraderMockRecorder) Upgrade(arg0, arg1, arg2, arg3 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockEksdUpgrader)(nil).Upgrade), arg0, arg1, arg2, arg3) } -// MockPackageInstaller is a mock of PackageInstaller interface. -type MockPackageInstaller struct { +// MockPackageManager is a mock of PackageManager interface. +type MockPackageManager struct { ctrl *gomock.Controller - recorder *MockPackageInstallerMockRecorder + recorder *MockPackageManagerMockRecorder } -// MockPackageInstallerMockRecorder is the mock recorder for MockPackageInstaller. -type MockPackageInstallerMockRecorder struct { - mock *MockPackageInstaller +// MockPackageManagerMockRecorder is the mock recorder for MockPackageManager. +type MockPackageManagerMockRecorder struct { + mock *MockPackageManager } -// NewMockPackageInstaller creates a new mock instance. -func NewMockPackageInstaller(ctrl *gomock.Controller) *MockPackageInstaller { - mock := &MockPackageInstaller{ctrl: ctrl} - mock.recorder = &MockPackageInstallerMockRecorder{mock} +// NewMockPackageManager creates a new mock instance. +func NewMockPackageManager(ctrl *gomock.Controller) *MockPackageManager { + mock := &MockPackageManager{ctrl: ctrl} + mock.recorder = &MockPackageManagerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPackageInstaller) EXPECT() *MockPackageInstallerMockRecorder { +func (m *MockPackageManager) EXPECT() *MockPackageManagerMockRecorder { return m.recorder } // InstallCuratedPackages mocks base method. -func (m *MockPackageInstaller) InstallCuratedPackages(arg0 context.Context) { +func (m *MockPackageManager) InstallCuratedPackages(arg0 context.Context) { m.ctrl.T.Helper() m.ctrl.Call(m, "InstallCuratedPackages", arg0) } // InstallCuratedPackages indicates an expected call of InstallCuratedPackages. -func (mr *MockPackageInstallerMockRecorder) InstallCuratedPackages(arg0 interface{}) *gomock.Call { +func (mr *MockPackageManagerMockRecorder) InstallCuratedPackages(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCuratedPackages", reflect.TypeOf((*MockPackageInstaller)(nil).InstallCuratedPackages), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstallCuratedPackages", reflect.TypeOf((*MockPackageManager)(nil).InstallCuratedPackages), arg0) +} + +// UpgradeCuratedPackages mocks base method. +func (m *MockPackageManager) UpgradeCuratedPackages(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpgradeCuratedPackages", arg0) +} + +// UpgradeCuratedPackages indicates an expected call of UpgradeCuratedPackages. +func (mr *MockPackageManagerMockRecorder) UpgradeCuratedPackages(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeCuratedPackages", reflect.TypeOf((*MockPackageManager)(nil).UpgradeCuratedPackages), arg0) } // MockClusterUpgrader is a mock of ClusterUpgrader interface. diff --git a/pkg/workflows/management/create.go b/pkg/workflows/management/create.go index 23b1425d9f81..8679d9a8c351 100644 --- a/pkg/workflows/management/create.go +++ b/pkg/workflows/management/create.go @@ -12,17 +12,17 @@ import ( // Create is a schema for create cluster. type Create struct { - bootstrapper interfaces.Bootstrapper - clientFactory interfaces.ClientFactory - provider providers.Provider - clusterManager interfaces.ClusterManager - gitOpsManager interfaces.GitOpsManager - writer filewriter.FileWriter - eksdInstaller interfaces.EksdInstaller - packageInstaller interfaces.PackageInstaller - clusterCreator interfaces.ClusterCreator - eksaInstaller interfaces.EksaInstaller - clusterMover interfaces.ClusterMover + bootstrapper interfaces.Bootstrapper + clientFactory interfaces.ClientFactory + provider providers.Provider + clusterManager interfaces.ClusterManager + gitOpsManager interfaces.GitOpsManager + writer filewriter.FileWriter + eksdInstaller interfaces.EksdInstaller + packageManager interfaces.PackageManager + clusterCreator interfaces.ClusterCreator + eksaInstaller interfaces.EksaInstaller + clusterMover interfaces.ClusterMover } // NewCreate builds a new create construct. @@ -30,42 +30,42 @@ func NewCreate(bootstrapper interfaces.Bootstrapper, clientFactory interfaces.ClientFactory, provider providers.Provider, clusterManager interfaces.ClusterManager, gitOpsManager interfaces.GitOpsManager, writer filewriter.FileWriter, eksdInstaller interfaces.EksdInstaller, - packageInstaller interfaces.PackageInstaller, + packageManager interfaces.PackageManager, clusterCreator interfaces.ClusterCreator, eksaInstaller interfaces.EksaInstaller, mover interfaces.ClusterMover, ) *Create { return &Create{ - bootstrapper: bootstrapper, - clientFactory: clientFactory, - provider: provider, - clusterManager: clusterManager, - gitOpsManager: gitOpsManager, - writer: writer, - eksdInstaller: eksdInstaller, - packageInstaller: packageInstaller, - clusterCreator: clusterCreator, - eksaInstaller: eksaInstaller, - clusterMover: mover, + bootstrapper: bootstrapper, + clientFactory: clientFactory, + provider: provider, + clusterManager: clusterManager, + gitOpsManager: gitOpsManager, + writer: writer, + eksdInstaller: eksdInstaller, + packageManager: packageManager, + clusterCreator: clusterCreator, + eksaInstaller: eksaInstaller, + clusterMover: mover, } } // Run runs all the create management cluster tasks. func (c *Create) Run(ctx context.Context, clusterSpec *cluster.Spec, validator interfaces.Validator) error { commandContext := &task.CommandContext{ - Bootstrapper: c.bootstrapper, - ClientFactory: c.clientFactory, - Provider: c.provider, - ClusterManager: c.clusterManager, - GitOpsManager: c.gitOpsManager, - ClusterSpec: clusterSpec, - Writer: c.writer, - Validations: validator, - EksdInstaller: c.eksdInstaller, - PackageInstaller: c.packageInstaller, - ClusterCreator: c.clusterCreator, - EksaInstaller: c.eksaInstaller, - ClusterMover: c.clusterMover, + Bootstrapper: c.bootstrapper, + ClientFactory: c.clientFactory, + Provider: c.provider, + ClusterManager: c.clusterManager, + GitOpsManager: c.gitOpsManager, + ClusterSpec: clusterSpec, + Writer: c.writer, + Validations: validator, + EksdInstaller: c.eksdInstaller, + PackageManager: c.packageManager, + ClusterCreator: c.clusterCreator, + EksaInstaller: c.eksaInstaller, + ClusterMover: c.clusterMover, } return task.NewTaskRunner(&setupAndValidateCreate{}, c.writer).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/create_curated_packages.go b/pkg/workflows/management/create_curated_packages.go index a3c9d407a23b..14ab0465f202 100644 --- a/pkg/workflows/management/create_curated_packages.go +++ b/pkg/workflows/management/create_curated_packages.go @@ -9,7 +9,7 @@ import ( type installCuratedPackagesTask struct{} func (s *installCuratedPackagesTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { - commandContext.PackageInstaller.InstallCuratedPackages(ctx) + commandContext.PackageManager.InstallCuratedPackages(ctx) return nil } diff --git a/pkg/workflows/management/create_test.go b/pkg/workflows/management/create_test.go index 3ec27e74b5a4..010319aaacb3 100644 --- a/pkg/workflows/management/create_test.go +++ b/pkg/workflows/management/create_test.go @@ -27,7 +27,7 @@ import ( type createTestSetup struct { t *testing.T - packageInstaller *mocks.MockPackageInstaller + packageInstaller *mocks.MockPackageManager clusterManager *mocks.MockClusterManager bootstrapper *mocks.MockBootstrapper gitOpsManager *mocks.MockGitOpsManager @@ -62,7 +62,7 @@ func newCreateTest(t *testing.T) *createTestSetup { eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl) eksaInstaller := mocks.NewMockEksaInstaller(mockCtrl) - packageInstaller := mocks.NewMockPackageInstaller(mockCtrl) + packageInstaller := mocks.NewMockPackageManager(mockCtrl) datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} machineConfigs := []providers.MachineConfig{&v1alpha1.VSphereMachineConfig{}} diff --git a/pkg/workflows/management/post_cluster_upgrade.go b/pkg/workflows/management/post_cluster_upgrade.go index 78c7975fa446..ca3f8f6ab128 100644 --- a/pkg/workflows/management/post_cluster_upgrade.go +++ b/pkg/workflows/management/post_cluster_upgrade.go @@ -28,7 +28,7 @@ func (s *postClusterUpgrade) Run(ctx context.Context, commandContext *task.Comma logger.Info(fmt.Sprintf("management cluster CAPI backup file not found: %v", err)) } - return nil + return &upgradeCuratedPackagesTask{} } func (s *postClusterUpgrade) Name() string { diff --git a/pkg/workflows/management/upgrade.go b/pkg/workflows/management/upgrade.go index e8de7b767fa2..001336c0feb6 100644 --- a/pkg/workflows/management/upgrade.go +++ b/pkg/workflows/management/upgrade.go @@ -24,6 +24,7 @@ type Upgrade struct { eksdUpgrader interfaces.EksdUpgrader upgradeChangeDiff *types.ChangeDiff clusterUpgrader interfaces.ClusterUpgrader + packageManager interfaces.PackageManager } // NewUpgrade builds a new upgrade construct. @@ -35,6 +36,7 @@ func NewUpgrade(clientFactory interfaces.ClientFactory, provider providers.Provi eksdUpgrader interfaces.EksdUpgrader, eksdInstaller interfaces.EksdInstaller, clusterUpgrade interfaces.ClusterUpgrader, + packageManager interfaces.PackageManager, ) *Upgrade { upgradeChangeDiff := types.NewChangeDiff() return &Upgrade{ @@ -48,6 +50,7 @@ func NewUpgrade(clientFactory interfaces.ClientFactory, provider providers.Provi eksdInstaller: eksdInstaller, upgradeChangeDiff: upgradeChangeDiff, clusterUpgrader: clusterUpgrade, + packageManager: packageManager, } } @@ -67,6 +70,7 @@ func (c *Upgrade) Run(ctx context.Context, clusterSpec *cluster.Spec, management EksdUpgrader: c.eksdUpgrader, UpgradeChangeDiff: c.upgradeChangeDiff, ClusterUpgrader: c.clusterUpgrader, + PackageManager: c.packageManager, } if features.IsActive(features.CheckpointEnabled()) { return task.NewTaskRunner(&setupAndValidateUpgrade{}, c.writer, task.WithCheckpointFile()).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/upgrade_curated_packages.go b/pkg/workflows/management/upgrade_curated_packages.go new file mode 100644 index 000000000000..13a8c7978142 --- /dev/null +++ b/pkg/workflows/management/upgrade_curated_packages.go @@ -0,0 +1,31 @@ +package management + +import ( + "context" + + "github.com/aws/eks-anywhere/pkg/task" +) + +type upgradeCuratedPackagesTask struct{} + +func (s *upgradeCuratedPackagesTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { + if commandContext.CurrentClusterSpec.Cluster.Spec.RegistryMirrorConfiguration.Equal(commandContext.ClusterSpec.Cluster.Spec.RegistryMirrorConfiguration) { + return nil + } + + commandContext.PackageManager.UpgradeCuratedPackages(ctx) + + return nil +} + +func (s *upgradeCuratedPackagesTask) Name() string { + return "upgrade-curated-packages" +} + +func (s *upgradeCuratedPackagesTask) Restore(_ context.Context, _ *task.CommandContext, _ *task.CompletedTask) (task.Task, error) { + return nil, nil +} + +func (s *upgradeCuratedPackagesTask) Checkpoint() *task.CompletedTask { + return nil +} diff --git a/pkg/workflows/management/upgrade_test.go b/pkg/workflows/management/upgrade_test.go index df6a979eff02..dc42c4a9a324 100644 --- a/pkg/workflows/management/upgrade_test.go +++ b/pkg/workflows/management/upgrade_test.go @@ -10,12 +10,14 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/features" writermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks" "github.com/aws/eks-anywhere/pkg/providers" @@ -48,6 +50,7 @@ type upgradeManagementTestSetup struct { managementCluster *types.Cluster managementStatePath string management *management.Upgrade + packages *mocks.MockPackageManager } func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { @@ -65,6 +68,7 @@ func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { capiUpgrader := mocks.NewMockCAPIManager(mockCtrl) machineConfigs := []providers.MachineConfig{&v1alpha1.VSphereMachineConfig{}} clusterUpgrader := mocks.NewMockClusterUpgrader(mockCtrl) + packageUpgrader := mocks.NewMockPackageManager(mockCtrl) management := management.NewUpgrade( clientFactory, provider, @@ -75,6 +79,7 @@ func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { eksdUpgrader, eksdInstaller, clusterUpgrader, + packageUpgrader, ) for _, e := range featureEnvVars { @@ -111,6 +116,7 @@ func newUpgradeManagementTest(t *testing.T) *upgradeManagementTestSetup { datacenterConfig: datacenterConfig, machineConfigs: machineConfigs, management: management, + packages: packageUpgrader, ctx: context.Background(), currentManagementComponents: cluster.ManagementComponentsFromBundles(currentClusterSpec.Bundles), newManagementComponents: cluster.ManagementComponentsFromBundles(newClusterSpec.Bundles), @@ -308,6 +314,10 @@ func (c *upgradeManagementTestSetup) expectPreflightValidationsToPass() { c.validator.EXPECT().PreflightValidations(c.ctx).Return(nil) } +func (c *upgradeManagementTestSetup) expectPackagesUpgrade() { + c.packages.EXPECT().UpgradeCuratedPackages(c.ctx) +} + func TestUpgradeManagementRunUpdateSetupFailed(t *testing.T) { os.Unsetenv(features.CheckpointEnabledEnvVar) features.ClearCache() @@ -730,6 +740,45 @@ func TestUpgradeManagementRunResumeClusterResourcesReconcileFailed(t *testing.T) } } +func TestUpgradeManagementRunUpgradeCuratedPackagesSuccess(t *testing.T) { + os.Unsetenv(features.CheckpointEnabledEnvVar) + features.ClearCache() + tt := newUpgradeManagementClusterTest(t) + tt.newClusterSpec.Cluster.Spec.RegistryMirrorConfiguration = &v1alpha1.RegistryMirrorConfiguration{} + packagesManager := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "eks-anywhere-packages", + Namespace: constants.EksaPackagesName, + }, + } + tt.client = test.NewFakeKubeClient(tt.currentClusterSpec.Cluster, tt.currentClusterSpec.EKSARelease, tt.currentClusterSpec.Bundles, packagesManager) + tt.expectSetup() + tt.expectPreflightValidationsToPass() + tt.expectUpdateSecrets(nil) + tt.expectEnsureManagementEtcdCAPIComponentsExist(nil) + tt.expectUpgradeCoreComponents() + tt.expectPauseGitOpsReconcile(nil) + tt.expectBackupManagementFromCluster(nil) + tt.expectPauseCAPIWorkloadClusters(nil) + tt.expectDatacenterConfig() + tt.expectMachineConfigs() + tt.expectInstallEksdManifest(nil) + tt.expectApplyBundles(nil) + tt.expectApplyReleases(nil) + tt.expectUpgradeManagementCluster() + tt.expectResumeCAPIWorkloadClustersAPI(nil) + tt.expectUpdateGitEksaSpec(nil) + tt.expectForceReconcileGitRepo(nil) + tt.expectResumeGitOpsReconcile(nil) + tt.expectWriteManagementClusterConfig(nil) + tt.expectPackagesUpgrade() + + err := tt.run() + if err != nil { + t.Fatalf("UpgradeManagement.Run() err = %v, want err = nil", err) + } +} + func TestUpgradeManagementRunSuccess(t *testing.T) { os.Unsetenv(features.CheckpointEnabledEnvVar) features.ClearCache() diff --git a/pkg/workflows/workload/create.go b/pkg/workflows/workload/create.go index 219a173a3d12..e7aa5146b746 100644 --- a/pkg/workflows/workload/create.go +++ b/pkg/workflows/workload/create.go @@ -19,7 +19,7 @@ type Create struct { writer filewriter.FileWriter eksdInstaller interfaces.EksdInstaller clusterCreator interfaces.ClusterCreator - packageInstaller interfaces.PackageInstaller + packageInstaller interfaces.PackageManager } // NewCreate builds a new create construct. @@ -27,7 +27,7 @@ func NewCreate(provider providers.Provider, clusterManager interfaces.ClusterManager, gitOpsManager interfaces.GitOpsManager, writer filewriter.FileWriter, eksdInstaller interfaces.EksdInstaller, - packageInstaller interfaces.PackageInstaller, + packageInstaller interfaces.PackageManager, clusterCreator interfaces.ClusterCreator, clientFactory interfaces.ClientFactory, ) *Create { diff --git a/pkg/workflows/workload/create_test.go b/pkg/workflows/workload/create_test.go index d5f667f3a8a1..5005ca2b5d32 100644 --- a/pkg/workflows/workload/create_test.go +++ b/pkg/workflows/workload/create_test.go @@ -35,7 +35,7 @@ type createTestSetup struct { writer *writermocks.MockFileWriter validator *mocks.MockValidator eksd *mocks.MockEksdInstaller - packageInstaller *mocks.MockPackageInstaller + packageInstaller *mocks.MockPackageManager clusterCreator *mocks.MockClusterCreator datacenterConfig providers.DatacenterConfig machineConfigs []providers.MachineConfig @@ -56,7 +56,7 @@ func newCreateTest(t *testing.T) *createTestSetup { provider := providermocks.NewMockProvider(mockCtrl) writer := writermocks.NewMockFileWriter(mockCtrl) eksd := mocks.NewMockEksdInstaller(mockCtrl) - packageInstaller := mocks.NewMockPackageInstaller(mockCtrl) + packageInstaller := mocks.NewMockPackageManager(mockCtrl) eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl) datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} diff --git a/pkg/workflows/workload/upgrade.go b/pkg/workflows/workload/upgrade.go index 722e4f52fef1..db955c59b99c 100644 --- a/pkg/workflows/workload/upgrade.go +++ b/pkg/workflows/workload/upgrade.go @@ -20,7 +20,7 @@ type Upgrade struct { writer filewriter.FileWriter eksdInstaller interfaces.EksdInstaller clusterUpgrader interfaces.ClusterUpgrader - packageInstaller interfaces.PackageInstaller + packageInstaller interfaces.PackageManager } // NewUpgrade builds a new upgrade construct. @@ -30,7 +30,7 @@ func NewUpgrade(clientFactory interfaces.ClientFactory, writer filewriter.FileWriter, clusterUpgrader interfaces.ClusterUpgrader, eksdInstaller interfaces.EksdInstaller, - packageInstaller interfaces.PackageInstaller, + packageInstaller interfaces.PackageManager, ) *Upgrade { return &Upgrade{ clientFactory: clientFactory, diff --git a/pkg/workflows/workload/upgrade_test.go b/pkg/workflows/workload/upgrade_test.go index 1f9e3e8f3cb3..05fb1e2ad2cd 100644 --- a/pkg/workflows/workload/upgrade_test.go +++ b/pkg/workflows/workload/upgrade_test.go @@ -34,7 +34,7 @@ type upgradeTestSetup struct { writer *writermocks.MockFileWriter validator *mocks.MockValidator eksd *mocks.MockEksdInstaller - packageInstaller *mocks.MockPackageInstaller + packageInstaller *mocks.MockPackageManager clusterUpgrader *mocks.MockClusterUpgrader datacenterConfig providers.DatacenterConfig machineConfigs []providers.MachineConfig @@ -55,7 +55,7 @@ func newUpgradeTest(t *testing.T) *upgradeTestSetup { provider := providermocks.NewMockProvider(mockCtrl) writer := writermocks.NewMockFileWriter(mockCtrl) eksd := mocks.NewMockEksdInstaller(mockCtrl) - packageInstaller := mocks.NewMockPackageInstaller(mockCtrl) + packageInstaller := mocks.NewMockPackageManager(mockCtrl) eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl) datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} From 8c04b4b4c070468198c4309642effdce0a6db8d8 Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 5 Jun 2024 10:40:06 -0700 Subject: [PATCH 184/193] [PR BOT] Generate release testdata files (#8244) --- .../testdata/main-bundle-release.yaml | 308 +++++++++--------- 1 file changed, 154 insertions(+), 154 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 514f310a0993..4093e662b0fb 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -28,7 +28,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -57,7 +57,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-39-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-25-40-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -152,7 +152,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -185,7 +185,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -209,7 +209,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -226,7 +226,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -252,7 +252,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -289,10 +289,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-39-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.25.16-eks-d-1-25-40-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.25.16 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-39.yaml - name: kubernetes-1-25-eks-39 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-25/kubernetes-1-25-eks-40.yaml + name: kubernetes-1-25-eks-40 ova: bottlerocket: {} raw: @@ -348,7 +348,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -372,7 +372,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -447,11 +447,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -462,8 +462,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -507,7 +507,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-39-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-25-40-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -540,11 +540,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml envoy: arch: - amd64 @@ -564,7 +564,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml tinkerbellStack: actions: cexec: @@ -731,7 +731,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.2+abcdef1 + version: v0.5.3+abcdef1 upgrader: upgrader: arch: @@ -741,7 +741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-39-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-25-40-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -764,7 +764,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -806,7 +806,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -835,7 +835,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-26-37-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -930,7 +930,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -963,7 +963,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -987,7 +987,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -1004,7 +1004,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1030,7 +1030,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -1067,10 +1067,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.15-eks-d-1-26-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.26.15-eks-d-1-26-37-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.26.15 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-35.yaml - name: kubernetes-1-26-eks-35 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-26/kubernetes-1-26-eks-37.yaml + name: kubernetes-1-26-eks-37 ova: bottlerocket: {} raw: @@ -1126,7 +1126,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -1150,7 +1150,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -1225,11 +1225,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -1240,8 +1240,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -1285,7 +1285,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-26-37-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -1318,11 +1318,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml envoy: arch: - amd64 @@ -1342,7 +1342,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml tinkerbellStack: actions: cexec: @@ -1509,7 +1509,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.2+abcdef1 + version: v0.5.3+abcdef1 upgrader: upgrader: arch: @@ -1519,7 +1519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-35-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-26-37-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -1542,7 +1542,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1584,7 +1584,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -1613,7 +1613,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-29-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-27-31-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1708,7 +1708,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -1741,7 +1741,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -1765,7 +1765,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -1782,7 +1782,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -1808,7 +1808,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -1845,10 +1845,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.13-eks-d-1-27-29-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.27.13 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-29.yaml - name: kubernetes-1-27-eks-29 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.27.14-eks-d-1-27-31-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.27.14 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-27/kubernetes-1-27-eks-31.yaml + name: kubernetes-1-27-eks-31 ova: bottlerocket: {} raw: @@ -1904,7 +1904,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -1928,7 +1928,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -2003,11 +2003,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2018,8 +2018,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -2063,7 +2063,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-29-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-27-31-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -2096,11 +2096,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml envoy: arch: - amd64 @@ -2120,7 +2120,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml tinkerbellStack: actions: cexec: @@ -2287,7 +2287,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.2+abcdef1 + version: v0.5.3+abcdef1 upgrader: upgrader: arch: @@ -2297,7 +2297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-29-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-27-31-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -2320,7 +2320,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2362,7 +2362,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -2391,7 +2391,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-22-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-28-24-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -2486,7 +2486,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -2519,7 +2519,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -2543,7 +2543,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -2560,7 +2560,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -2586,7 +2586,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -2623,10 +2623,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.9-eks-d-1-28-22-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.28.9 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-22.yaml - name: kubernetes-1-28-eks-22 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.28.10-eks-d-1-28-24-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.28.10 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-28/kubernetes-1-28-eks-24.yaml + name: kubernetes-1-28-eks-24 ova: bottlerocket: {} raw: @@ -2682,7 +2682,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -2706,7 +2706,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -2781,11 +2781,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -2796,8 +2796,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -2841,7 +2841,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-22-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-28-24-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -2874,11 +2874,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml envoy: arch: - amd64 @@ -2898,7 +2898,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml tinkerbellStack: actions: cexec: @@ -3065,7 +3065,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.2+abcdef1 + version: v0.5.3+abcdef1 upgrader: upgrader: arch: @@ -3075,7 +3075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-22-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-28-24-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3098,7 +3098,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3140,7 +3140,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -3169,7 +3169,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-11-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-29-13-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -3264,7 +3264,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3297,7 +3297,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -3321,7 +3321,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -3338,7 +3338,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -3364,7 +3364,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -3401,10 +3401,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.4-eks-d-1-29-11-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.29.4 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-11.yaml - name: kubernetes-1-29-eks-11 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.29.5-eks-d-1-29-13-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.29.5 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-29/kubernetes-1-29-eks-13.yaml + name: kubernetes-1-29-eks-13 ova: bottlerocket: {} raw: @@ -3460,7 +3460,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -3484,7 +3484,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -3559,11 +3559,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -3574,8 +3574,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -3619,7 +3619,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-11-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-29-13-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -3652,11 +3652,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml envoy: arch: - amd64 @@ -3676,7 +3676,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml tinkerbellStack: actions: cexec: @@ -3843,7 +3843,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.2+abcdef1 + version: v0.5.3+abcdef1 upgrader: upgrader: arch: @@ -3853,7 +3853,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-11-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-29-13-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -3876,7 +3876,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -3918,7 +3918,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -3947,7 +3947,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-30-6-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -4042,7 +4042,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 @@ -4075,7 +4075,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/cluster-api/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -4099,7 +4099,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api/manifests/control-plane-kubeadm/v1.7.2/metadata.yaml version: v1.7.2+abcdef1 @@ -4116,7 +4116,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 manager: arch: - amd64 @@ -4142,7 +4142,7 @@ spec: os: linux sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.16/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/containerd/v1.7.17/containerd-v0.0.0-dev-build.0-linux-amd64.tar.gz crictl: arch: - amd64 @@ -4179,10 +4179,10 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.0-eks-d-1-30-4-eks-a-v0.0.0-dev-build.1 - kubeVersion: v1.30.0 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-4.yaml - name: kubernetes-1-30-eks-4 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.30.1-eks-d-1-30-6-eks-a-v0.0.0-dev-build.1 + kubeVersion: v1.30.1 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-30/kubernetes-1-30-eks-6.yaml + name: kubernetes-1-30-eks-6 ova: bottlerocket: {} raw: @@ -4238,7 +4238,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.13/metadata.yaml version: v1.0.13+abcdef1 @@ -4262,7 +4262,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 metadata: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.21/metadata.yaml version: v1.0.21+abcdef1 @@ -4337,11 +4337,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-nutanix os: linux - uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/nutanix-cloud-native/cluster-api-provider-nutanix:v1.3.5-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/infrastructure-components.yaml kubeVip: arch: - amd64 @@ -4352,8 +4352,8 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.3/metadata.yaml - version: v1.3.3+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-nutanix/manifests/infrastructure-nutanix/v1.3.5/metadata.yaml + version: v1.3.5+abcdef1 packageController: credentialProviderPackage: arch: @@ -4397,7 +4397,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap-snow os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap-snow:v1-30-6-eks-a-v0.0.0-dev-build.1 components: uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-aws-snow/manifests/infrastructure-snow/v0.1.27/infrastructure-components.yaml kubeVip: @@ -4430,11 +4430,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml envoy: arch: - amd64 @@ -4454,7 +4454,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml tinkerbellStack: actions: cexec: @@ -4621,7 +4621,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.2+abcdef1 + version: v0.5.3+abcdef1 upgrader: upgrader: arch: @@ -4631,7 +4631,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: upgrader os: linux - uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-4-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/aws/upgrader:v1-30-6-eks-a-v0.0.0-dev-build.1 vSphere: clusterAPIController: arch: @@ -4654,7 +4654,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kube-rbac-proxy os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.17.1-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.18.0-eks-a-v0.0.0-dev-build.1 kubeVip: arch: - amd64 From 5df610ac93885f54f6aeb9039f29e6b74acf086d Mon Sep 17 00:00:00 2001 From: EKS Distro PR Bot <75336432+eks-distro-pr-bot@users.noreply.github.com> Date: Wed, 5 Jun 2024 16:52:06 -0700 Subject: [PATCH 185/193] [PR BOT] Generate release testdata files (#8253) --- .../testdata/main-bundle-release.yaml | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/release/cli/pkg/operations/testdata/main-bundle-release.yaml b/release/cli/pkg/operations/testdata/main-bundle-release.yaml index 4093e662b0fb..f413688bca09 100644 --- a/release/cli/pkg/operations/testdata/main-bundle-release.yaml +++ b/release/cli/pkg/operations/testdata/main-bundle-release.yaml @@ -423,11 +423,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.25" nutanix: cloudProvider: @@ -540,11 +540,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml envoy: arch: - amd64 @@ -564,7 +564,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: actions: cexec: @@ -731,7 +731,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.3+abcdef1 + version: v0.5.2+abcdef1 upgrader: upgrader: arch: @@ -1201,11 +1201,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.26" nutanix: cloudProvider: @@ -1318,11 +1318,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml envoy: arch: - amd64 @@ -1342,7 +1342,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: actions: cexec: @@ -1509,7 +1509,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.3+abcdef1 + version: v0.5.2+abcdef1 upgrader: upgrader: arch: @@ -1979,11 +1979,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.27" nutanix: cloudProvider: @@ -2096,11 +2096,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml envoy: arch: - amd64 @@ -2120,7 +2120,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: actions: cexec: @@ -2287,7 +2287,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.3+abcdef1 + version: v0.5.2+abcdef1 upgrader: upgrader: arch: @@ -2757,11 +2757,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.28" nutanix: cloudProvider: @@ -2874,11 +2874,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml envoy: arch: - amd64 @@ -2898,7 +2898,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: actions: cexec: @@ -3065,7 +3065,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.3+abcdef1 + version: v0.5.2+abcdef1 upgrader: upgrader: arch: @@ -3535,11 +3535,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.29" nutanix: cloudProvider: @@ -3652,11 +3652,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml envoy: arch: - amd64 @@ -3676,7 +3676,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: actions: cexec: @@ -3843,7 +3843,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.3+abcdef1 + version: v0.5.2+abcdef1 upgrader: upgrader: arch: @@ -4313,11 +4313,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: haproxy os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.22.0-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.23.0-eks-a-v0.0.0-dev-build.1 kindnetd: manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.22.0/kindnetd.yaml - version: v0.22.0+abcdef1 + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/kind/manifests/kindnetd/v0.23.0/kindnetd.yaml + version: v0.23.0+abcdef1 kubeVersion: "1.30" nutanix: cloudProvider: @@ -4430,11 +4430,11 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: cluster-api-provider-tinkerbell os: linux - uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.3-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/tinkerbell/cluster-api-provider-tinkerbell:v0.5.2-eks-a-v0.0.0-dev-build.1 clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/cluster-template.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/cluster-template.yaml components: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/infrastructure-components.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/infrastructure-components.yaml envoy: arch: - amd64 @@ -4454,7 +4454,7 @@ spec: os: linux uri: public.ecr.aws/release-container-registry/kube-vip/kube-vip:v0.8.0-eks-a-v0.0.0-dev-build.1 metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.3/metadata.yaml + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cluster-api-provider-tinkerbell/manifests/infrastructure-tinkerbell/v0.5.2/metadata.yaml tinkerbellStack: actions: cexec: @@ -4621,7 +4621,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: tinkerbell-chart uri: public.ecr.aws/release-container-registry/tinkerbell/tinkerbell-chart:0.2.5-eks-a-v0.0.0-dev-build.1 - version: v0.5.3+abcdef1 + version: v0.5.2+abcdef1 upgrader: upgrader: arch: From 5940b0e74c5596721d009e3ed4ac84866dc0c707 Mon Sep 17 00:00:00 2001 From: Abhay Krishna Date: Thu, 6 Jun 2024 00:51:56 -0700 Subject: [PATCH 186/193] Update Docker test name for presubmit E2E (#8257) --- Makefile | 5 ++--- scripts/e2e_test_docker.sh | 2 +- test/e2e/README.md | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 79d7c19620fb..054ee497cb29 100644 --- a/Makefile +++ b/Makefile @@ -46,15 +46,14 @@ endif ifeq (,$(findstring $(BRANCH_NAME),main)) ## use the branch-specific bundle manifest if the branch is not 'main' -BUNDLE_MANIFEST_URL?=https://dev-release-assets.eks-anywhere.model-rocket.aws.dev/${BRANCH_NAME}/bundle-release.yaml RELEASE_MANIFEST_URL?=$(RELEASE_MANIFEST_HOST)/${BRANCH_NAME}/eks-a-release.yaml LATEST=$(BRANCH_NAME) else ## use the standard bundle manifest if the branch is 'main' -BUNDLE_MANIFEST_URL?=https://dev-release-assets.eks-anywhere.model-rocket.aws.dev/bundle-release.yaml RELEASE_MANIFEST_URL?=$(RELEASE_MANIFEST_HOST)/eks-a-release.yaml LATEST=latest endif +BUNDLE_MANIFEST_URL?=$(shell curl $(RELEASE_MANIFEST_URL) | yq ".spec.releases[-1].bundleManifestUrl") # DEV_GIT_VERSION should be something like v0.19.0-dev+latest, depending on the base branch # and if this is a local build or a CI build. @@ -160,7 +159,7 @@ EKS_A_CROSS_PLATFORMS := $(foreach platform,$(EKS_A_PLATFORMS),eks-a-cross-platf E2E_CROSS_PLATFORMS := $(foreach platform,$(EKS_A_PLATFORMS),e2e-cross-platform-$(platform)) EKS_A_RELEASE_CROSS_PLATFORMS := $(foreach platform,$(EKS_A_PLATFORMS),eks-a-release-cross-platform-$(platform)) -DOCKER_E2E_TEST := TestDockerKubernetes125SimpleFlow +DOCKER_E2E_TEST := TestDockerKubernetes130SimpleFlow LOCAL_E2E_TESTS ?= $(DOCKER_E2E_TEST) EMBED_CONFIG_FOLDER = pkg/files/config diff --git a/scripts/e2e_test_docker.sh b/scripts/e2e_test_docker.sh index 61fd597e863b..278a15872126 100755 --- a/scripts/e2e_test_docker.sh +++ b/scripts/e2e_test_docker.sh @@ -36,7 +36,7 @@ fi REPO_ROOT=$(git rev-parse --show-toplevel) BIN_FOLDER=$REPO_ROOT/bin -TEST_REGEX="${1:-TestDockerKubernetes125SimpleFlow}" +TEST_REGEX="${1:-TestDockerKubernetes130SimpleFlow}" BRANCH_NAME="${2:-main}" diff --git a/test/e2e/README.md b/test/e2e/README.md index fa0325a124c6..553475361c03 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -11,8 +11,8 @@ or # # The makefile will include the .env file and export all the vars to the environment for you # -# By default the local-e2e target will run TestDockerKubernetes125SimpleFlow. You can either -# override LOCAL_E2E_TESTS in your .env file or pass it on the cli every time (i.e LOCAL_E2E_TESTS=TestDockerKubernetes125SimpleFlow) +# By default the local-e2e target will run TestDockerKubernetes130SimpleFlow. You can either +# override LOCAL_E2E_TESTS in your .env file or pass it on the cli every time (i.e LOCAL_E2E_TESTS=TestDockerKubernetes130SimpleFlow) make local-e2e ``` or From 8ab221a8bb18e7e76fd610896d227cb54cc4bd2a Mon Sep 17 00:00:00 2001 From: Saurabh Parekh Date: Thu, 6 Jun 2024 13:19:58 -0700 Subject: [PATCH 187/193] Cleanup duplicate code in curated packages and rename function (#8258) Override govulncheck --- test/e2e/autoscaler.go | 4 ++-- test/e2e/certmanager.go | 14 ++------------ test/e2e/curatedpackages.go | 6 +++--- test/e2e/emissary.go | 6 +++--- 4 files changed, 10 insertions(+), 20 deletions(-) diff --git a/test/e2e/autoscaler.go b/test/e2e/autoscaler.go index cf62a65ad926..f521c17431f0 100644 --- a/test/e2e/autoscaler.go +++ b/test/e2e/autoscaler.go @@ -13,7 +13,7 @@ func runAutoscalerWithMetricsServerSimpleFlow(test *framework.ClusterE2ETest) { metricServerName := "metrics-server" targetNamespace := "eksa-packages" test.InstallAutoScalerWithMetricServer(targetNamespace) - test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withMgmtCluster(test)) + test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withCluster(test)) }) } @@ -25,7 +25,7 @@ func runAutoscalerWithMetricsServerTinkerbellSimpleFlow(test *framework.ClusterE metricServerName := "metrics-server" targetNamespace := "eksa-packages" test.InstallAutoScalerWithMetricServer(targetNamespace) - test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withMgmtCluster(test)) + test.CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace, withCluster(test)) test.DeleteCluster() test.ValidateHardwareDecommissioned() } diff --git a/test/e2e/certmanager.go b/test/e2e/certmanager.go index 4fd44856c7c4..0efe4c92fb08 100644 --- a/test/e2e/certmanager.go +++ b/test/e2e/certmanager.go @@ -4,12 +4,9 @@ package e2e import ( - "fmt" - "path/filepath" "time" "github.com/aws/eks-anywhere/pkg/kubeconfig" - "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/test/framework" ) @@ -30,18 +27,11 @@ func runCertManagerRemoteClusterInstallSimpleFlow(test *framework.MulticlusterE2 packagePrefix := "test" packageFile := e.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace) test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName)) - e.VerifyCertManagerPackageInstalled(packagePrefix, EksaPackagesNamespace, cmPackageName, withMgmtClusterSetup(test.ManagementCluster)) - e.CleanupCerts(withMgmtClusterSetup(test.ManagementCluster)) + e.VerifyCertManagerPackageInstalled(packagePrefix, EksaPackagesNamespace, cmPackageName, withCluster(test.ManagementCluster)) + e.CleanupCerts(withCluster(test.ManagementCluster)) e.DeleteClusterWithKubectl() e.ValidateClusterDelete() }) time.Sleep(5 * time.Minute) test.DeleteManagementCluster() } - -func withMgmtClusterSetup(cluster *framework.ClusterE2ETest) *types.Cluster { - return &types.Cluster{ - Name: cluster.ClusterName, - KubeconfigFile: filepath.Join(cluster.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", cluster.ClusterName)), - } -} diff --git a/test/e2e/curatedpackages.go b/test/e2e/curatedpackages.go index 4ba25829bcdd..547b30e82e82 100644 --- a/test/e2e/curatedpackages.go +++ b/test/e2e/curatedpackages.go @@ -33,7 +33,7 @@ func runCuratedPackageInstall(test *framework.ClusterE2ETest) { packagePrefix := "test" packageFile := test.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace) test.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ClusterName)) - test.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withMgmtCluster(test)) + test.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withCluster(test)) } func runCuratedPackageInstallSimpleFlow(test *framework.ClusterE2ETest) { @@ -65,7 +65,7 @@ func runCuratedPackageRemoteClusterInstallSimpleFlow(test *framework.Multicluste packagePrefix := "test" packageFile := e.BuildPackageConfigFile(packageName, packagePrefix, EksaPackagesNamespace) test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName)) - e.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withMgmtCluster(test.ManagementCluster)) + e.VerifyHelloPackageInstalled(packagePrefix+"-"+packageName, withCluster(test.ManagementCluster)) e.DeleteClusterWithKubectl() e.ValidateClusterDelete() }) @@ -219,7 +219,7 @@ func packageBundleURI(version v1alpha1.KubernetesVersion) string { return fmt.Sprintf("%s:%s", EksaPackageBundleURI, tag) } -func withMgmtCluster(cluster *framework.ClusterE2ETest) *types.Cluster { +func withCluster(cluster *framework.ClusterE2ETest) *types.Cluster { return &types.Cluster{ Name: cluster.ClusterName, KubeconfigFile: filepath.Join(cluster.ClusterName, fmt.Sprintf("%s-eks-a-cluster.kubeconfig", cluster.ClusterName)), diff --git a/test/e2e/emissary.go b/test/e2e/emissary.go index ef6c140f9fbf..73a6288da0a4 100644 --- a/test/e2e/emissary.go +++ b/test/e2e/emissary.go @@ -20,9 +20,9 @@ func runCuratedPackageEmissaryInstall(test *framework.ClusterE2ETest) { test.SetPackageBundleActive() packageFile := test.BuildPackageConfigFile(emissaryPackageName, emissaryPackagePrefix, EksaPackagesNamespace) test.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ClusterName)) - test.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withMgmtCluster(test)) + test.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withCluster(test)) if test.Provider.Name() == constants.DockerProviderName { - test.TestEmissaryPackageRouting(emissaryPackagePrefix+"-"+emissaryPackageName, "hello", withMgmtCluster(test)) + test.TestEmissaryPackageRouting(emissaryPackagePrefix+"-"+emissaryPackageName, "hello", withCluster(test)) } } @@ -41,7 +41,7 @@ func runCuratedPackageEmissaryRemoteClusterInstallSimpleFlow(test *framework.Mul test.ManagementCluster.SetPackageBundleActive() packageFile := e.BuildPackageConfigFile(emissaryPackageName, emissaryPackagePrefix, EksaPackagesNamespace) test.ManagementCluster.InstallCuratedPackageFile(packageFile, kubeconfig.FromClusterName(test.ManagementCluster.ClusterName)) - e.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withMgmtCluster(test.ManagementCluster)) + e.VerifyEmissaryPackageInstalled(emissaryPackagePrefix+"-"+emissaryPackageName, withCluster(test.ManagementCluster)) e.DeleteClusterWithKubectl() e.ValidateClusterDelete() }) From e3d0f13c88dbc7c193fd3f9c77fdba0c25242bd0 Mon Sep 17 00:00:00 2001 From: Ilya Alekseyev Date: Fri, 7 Jun 2024 00:27:07 +0200 Subject: [PATCH 188/193] Add control plane failure domains feature for Nutanix provider (#8192) * Add failure domains support for Nutanix Provider - change Nutanix Datacenter CRD - change templates - generate manifests - add unittest * Fix lint error * Regenerate deepcopy files * Fix PR comments - add validation - fix template - add unittest for validation * Add validations and unit tests * Allow updating failure domains for existing clusters --- ...mazonaws.com_nutanixdatacenterconfigs.yaml | 64 ++ config/manifest/eksa-components.yaml | 64 ++ .../v1alpha1/nutanixdatacenterconfig_test.go | 16 + .../v1alpha1/nutanixdatacenterconfig_types.go | 58 ++ ...tacenterconfig-invalid-failuredomains.yaml | 30 + ...datacenterconfig-valid-failuredomains.yaml | 30 + pkg/api/v1alpha1/zz_generated.deepcopy.go | 30 + pkg/providers/nutanix/config/cp-template.yaml | 26 + pkg/providers/nutanix/provider.go | 1 - pkg/providers/nutanix/provider_test.go | 18 +- pkg/providers/nutanix/template.go | 31 + pkg/providers/nutanix/template_test.go | 28 + .../cluster_nutanix_failure_domains.yaml | 87 +++ ...datacenterConfig_with_failure_domains.yaml | 27 + ..._with_failure_domains_invalid_cluster.yaml | 27 + ...fig_with_failure_domains_invalid_name.yaml | 27 + ...g_with_failure_domains_invalid_subnet.yaml | 27 + .../expected_results_failure_domains.yaml | 631 ++++++++++++++++++ pkg/providers/nutanix/validator.go | 32 + pkg/providers/nutanix/validator_test.go | 138 ++++ 20 files changed, 1389 insertions(+), 3 deletions(-) create mode 100644 pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml create mode 100644 pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml create mode 100644 pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml create mode 100644 pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml create mode 100644 pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml index 49d4c9af3a11..ddcf876d2e87 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixdatacenterconfigs.yaml @@ -56,6 +56,70 @@ spec: endpoint: description: Endpoint is the Endpoint of Nutanix Prism Central type: string + failureDomains: + description: FailureDomains is the optional list of failure domains + for the Nutanix Datacenter. + items: + description: NutanixDatacenterFailureDomain defines the failure + domain for the Nutanix Datacenter. + properties: + cluster: + description: Cluster is the Prism Element cluster name or uuid + that is connected to the Prism Central. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + name: + description: Name is the unique name of the failure domain. + Name must be between 1 and 64 characters long. It must consist + of only lower case alphanumeric characters and hyphens (-). + It must start and end with an alphanumeric character. + maxLength: 64 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + subnets: + description: Subnets holds the list of subnets identifiers cluster's + network subnets. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix Prism resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + type: array + required: + - name + type: object + type: array insecure: description: Insecure is the optional flag to skip TLS verification. Nutanix Prism Central installation by default ships with a self-signed diff --git a/config/manifest/eksa-components.yaml b/config/manifest/eksa-components.yaml index 179d992644b2..7077c75c929d 100644 --- a/config/manifest/eksa-components.yaml +++ b/config/manifest/eksa-components.yaml @@ -5507,6 +5507,70 @@ spec: endpoint: description: Endpoint is the Endpoint of Nutanix Prism Central type: string + failureDomains: + description: FailureDomains is the optional list of failure domains + for the Nutanix Datacenter. + items: + description: NutanixDatacenterFailureDomain defines the failure + domain for the Nutanix Datacenter. + properties: + cluster: + description: Cluster is the Prism Element cluster name or uuid + that is connected to the Prism Central. + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + name: + description: Name is the unique name of the failure domain. + Name must be between 1 and 64 characters long. It must consist + of only lower case alphanumeric characters and hyphens (-). + It must start and end with an alphanumeric character. + maxLength: 64 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + subnets: + description: Subnets holds the list of subnets identifiers cluster's + network subnets. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix Prism resource (cluster, image, subnet, etc.) + properties: + name: + description: name is the resource name in the PC + type: string + type: + description: Type is the identifier type to use for this + resource. + enum: + - uuid + - name + type: string + uuid: + description: uuid is the UUID of the resource in the PC. + type: string + required: + - type + type: object + type: array + required: + - name + type: object + type: array insecure: description: Insecure is the optional flag to skip TLS verification. Nutanix Prism Central installation by default ships with a self-signed diff --git a/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go b/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go index 1eaf40e16525..1055bb8e05c6 100644 --- a/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go +++ b/pkg/api/v1alpha1/nutanixdatacenterconfig_test.go @@ -175,6 +175,22 @@ func TestGetNutanixDatacenterConfigValidConfig(t *testing.T) { assert.Contains(t, err.Error(), "NutanixDatacenterConfig credentialRef name is not set or is empty") }, }, + { + name: "datacenterconfig-valid-failure-domains", + fileName: "testdata/nutanix/datacenterconfig-valid-failuredomains.yaml", + assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) { + assert.NoError(t, dcConf.Validate()) + }, + }, + { + name: "datecenterconfig-invalid-failure-domains", + fileName: "testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml", + assertions: func(t *testing.T, dcConf *v1alpha1.NutanixDatacenterConfig) { + err := dcConf.Validate() + assert.Error(t, err) + assert.Contains(t, err.Error(), "NutanixDatacenterConfig.Spec.FailureDomains.Subnets: missing subnet UUID: default/eksa-unit-test") + }, + }, } for _, test := range tests { diff --git a/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go b/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go index 4fb6fc522d75..2ab1b77467f3 100644 --- a/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go +++ b/pkg/api/v1alpha1/nutanixdatacenterconfig_types.go @@ -43,6 +43,31 @@ type NutanixDatacenterConfigSpec struct { // for the Nutanix Prism Central. The namespace for the secret is assumed to be a constant i.e. eksa-system. // +optional CredentialRef *Ref `json:"credentialRef,omitempty"` + + // FailureDomains is the optional list of failure domains for the Nutanix Datacenter. + // +optional + FailureDomains []NutanixDatacenterFailureDomain `json:"failureDomains,omitempty"` +} + +// NutanixDatacenterFailureDomain defines the failure domain for the Nutanix Datacenter. +type NutanixDatacenterFailureDomain struct { + // Name is the unique name of the failure domain. + // Name must be between 1 and 64 characters long. + // It must consist of only lower case alphanumeric characters and hyphens (-). + // It must start and end with an alphanumeric character. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + Name string `json:"name"` + + // Cluster is the Prism Element cluster name or uuid that is connected to the Prism Central. + // +kubebuilder:validation:Required + Cluster NutanixResourceIdentifier `json:"cluster,omitempty"` + + // Subnets holds the list of subnets identifiers cluster's network subnets. + // +kubebuilder:validation:Required + Subnets []NutanixResourceIdentifier `json:"subnets,omitempty"` } // NutanixDatacenterConfigStatus defines the observed state of NutanixDatacenterConfig. @@ -140,9 +165,42 @@ func (in *NutanixDatacenterConfig) Validate() error { } } + if in.Spec.FailureDomains != nil && len(in.Spec.FailureDomains) != 0 { + dccName := in.Namespace + "/" + in.Name + validateClusterResourceIdentifier := createValidateNutanixResourceFunc("NutanixDatacenterConfig.Spec.FailureDomains.Cluster", "cluster", dccName) + validateSubnetResourceIdentifier := createValidateNutanixResourceFunc("NutanixDatacenterConfig.Spec.FailureDomains.Subnets", "subnet", dccName) + for _, fd := range in.Spec.FailureDomains { + if err := validateClusterResourceIdentifier(&fd.Cluster); err != nil { + return err + } + + for _, subnet := range fd.Subnets { + if err := validateSubnetResourceIdentifier(&subnet); err != nil { + return err + } + } + } + } + return nil } +func createValidateNutanixResourceFunc(msgPrefix, entityName, mfstName string) func(*NutanixResourceIdentifier) error { + return func(ntnxRId *NutanixResourceIdentifier) error { + if ntnxRId.Type != NutanixIdentifierName && ntnxRId.Type != NutanixIdentifierUUID { + return fmt.Errorf("%s: invalid identifier type for %s: %s", msgPrefix, entityName, ntnxRId.Type) + } + + if ntnxRId.Type == NutanixIdentifierName && (ntnxRId.Name == nil || *ntnxRId.Name == "") { + return fmt.Errorf("%s: missing %s name: %s", msgPrefix, entityName, mfstName) + } else if ntnxRId.Type == NutanixIdentifierUUID && (ntnxRId.UUID == nil || *ntnxRId.UUID == "") { + return fmt.Errorf("%s: missing %s UUID: %s", msgPrefix, entityName, mfstName) + } + + return nil + } +} + // SetDefaults sets default values for the NutanixDatacenterConfig object. func (in *NutanixDatacenterConfig) SetDefaults() { if in.Spec.CredentialRef == nil { diff --git a/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml new file mode 100644 index 000000000000..b25f74bc958c --- /dev/null +++ b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-invalid-failuredomains.yaml @@ -0,0 +1,30 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + name: eksa-unit-test + kind: Secret + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" diff --git a/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml new file mode 100644 index 000000000000..02f806ff343e --- /dev/null +++ b/pkg/api/v1alpha1/testdata/nutanix/datacenterconfig-valid-failuredomains.yaml @@ -0,0 +1,30 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + name: eksa-unit-test + kind: Secret + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" diff --git a/pkg/api/v1alpha1/zz_generated.deepcopy.go b/pkg/api/v1alpha1/zz_generated.deepcopy.go index 14f6a15e57f1..b595f09e020a 100644 --- a/pkg/api/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/api/v1alpha1/zz_generated.deepcopy.go @@ -2012,6 +2012,13 @@ func (in *NutanixDatacenterConfigSpec) DeepCopyInto(out *NutanixDatacenterConfig *out = new(Ref) **out = **in } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]NutanixDatacenterFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixDatacenterConfigSpec. @@ -2039,6 +2046,29 @@ func (in *NutanixDatacenterConfigStatus) DeepCopy() *NutanixDatacenterConfigStat return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixDatacenterFailureDomain) DeepCopyInto(out *NutanixDatacenterFailureDomain) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixDatacenterFailureDomain. +func (in *NutanixDatacenterFailureDomain) DeepCopy() *NutanixDatacenterFailureDomain { + if in == nil { + return nil + } + out := new(NutanixDatacenterFailureDomain) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixMachineConfig) DeepCopyInto(out *NutanixMachineConfig) { *out = *in diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index 9d147c482598..82acdc53dc66 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -5,7 +5,33 @@ metadata: name: "{{.clusterName}}" namespace: "{{.eksaSystemNamespace}}" spec: +{{- if .failureDomains }} + failureDomains: + {{- range $index, $value := .failureDomains}} + - name: "{{ $value.Name }}" + cluster: + {{- if (eq $value.Cluster.Type "uuid") }} + type: "uuid" + uuid: "{{ $value.Cluster.UUID }}" + {{- else if (eq $value.Cluster.Type "name") }} + type: "name" + name: "{{ $value.Cluster.Name }}" + {{- end}} + subnets: + {{- range $value.Subnets}} + {{- if (eq .Type "uuid") }} + - type: "uuid" + uuid: "{{ .UUID }}" + {{- else if (eq .Type "name") }} + - type: "name" + name: "{{ .Name }}" + {{- end}} + {{- end}} + controlPlane: true + {{- end }} +{{- else }} failureDomains: [] +{{- end}} prismCentral: {{- if .nutanixAdditionalTrustBundle }} additionalTrustBundle: diff --git a/pkg/providers/nutanix/provider.go b/pkg/providers/nutanix/provider.go index e5a7de728682..bc330f656a4c 100644 --- a/pkg/providers/nutanix/provider.go +++ b/pkg/providers/nutanix/provider.go @@ -419,7 +419,6 @@ func needsNewEtcdTemplate(oldSpec, newSpec *cluster.Spec, oldNmc, newNmc *v1alph if oldSpec.Bundles.Spec.Number != newSpec.Bundles.Spec.Number { return true } - return AnyImmutableFieldChanged(oldNmc, newNmc) } diff --git a/pkg/providers/nutanix/provider_test.go b/pkg/providers/nutanix/provider_test.go index 119d683da6fa..5dd4bab83bbe 100644 --- a/pkg/providers/nutanix/provider_test.go +++ b/pkg/providers/nutanix/provider_test.go @@ -530,7 +530,20 @@ func TestNutanixProviderSetupAndValidateDeleteCluster(t *testing.T) { } func TestNutanixProviderSetupAndValidateUpgradeCluster(t *testing.T) { - provider := testDefaultNutanixProvider(t) + ctrl := gomock.NewController(t) + executable := mockexecutables.NewMockExecutable(ctrl) + executable.EXPECT().ExecuteWithStdin(gomock.Any(), gomock.Any(), gomock.Any()).Return(bytes.Buffer{}, nil).AnyTimes() + executable.EXPECT().Execute(gomock.Any(), "get", + "--ignore-not-found", "-o", "json", "--kubeconfig", "testdata/kubeconfig.yaml", "nutanixdatacenterconfigs.anywhere.eks.amazonaws.com", "--namespace", "default", "eksa-unit-test").Return(*bytes.NewBufferString(nutanixDatacenterConfigSpecJSON), nil).AnyTimes() + kubectl := executables.NewKubectl(executable) + mockClient := mocknutanix.NewMockClient(ctrl) + mockCertValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + mockHTTPClient := &http.Client{Transport: mockTransport} + mockWriter := filewritermocks.NewMockFileWriter(ctrl) + provider := testNutanixProvider(t, mockClient, kubectl, mockCertValidator, mockHTTPClient, mockWriter) + tests := []struct { name string clusterConfFile string @@ -558,7 +571,8 @@ func TestNutanixProviderSetupAndValidateUpgradeCluster(t *testing.T) { for _, tt := range tests { clusterSpec := test.NewFullClusterSpec(t, tt.clusterConfFile) - err := provider.SetupAndValidateUpgradeCluster(context.Background(), &types.Cluster{Name: "eksa-unit-test"}, clusterSpec, clusterSpec) + cluster := &types.Cluster{Name: "eksa-unit-test", KubeconfigFile: "testdata/kubeconfig.yaml"} + err := provider.SetupAndValidateUpgradeCluster(context.Background(), cluster, clusterSpec, clusterSpec) if tt.expectErr { assert.Error(t, err, tt.name) thenErrorExpected(t, tt.expectErrStr, err) diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index f1cb5e2493c7..24e93dcc03b2 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -7,6 +7,7 @@ import ( "sigs.k8s.io/yaml" + capxv1beta1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1" "github.com/nutanix-cloud-native/prism-go-client/environment/credentials" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -176,6 +177,8 @@ func buildTemplateMapCP( return nil, err } + failureDomains := generateNutanixFailureDomains(datacenterSpec.FailureDomains) + values := map[string]interface{}{ "auditPolicy": auditPolicy, "apiServerExtraArgs": apiServerExtraArgs.ToPartialYaml(), @@ -188,6 +191,7 @@ func buildTemplateMapCP( "controlPlaneTaints": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Taints, "eksaSystemNamespace": constants.EksaSystemNamespace, "format": format, + "failureDomains": failureDomains, "podCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Pods.CidrBlocks, "serviceCidrs": clusterSpec.Cluster.Spec.ClusterNetwork.Services.CidrBlocks, "kubernetesVersion": versionsBundle.KubeDistro.Kubernetes.Tag, @@ -460,3 +464,30 @@ func generateNoProxyList(clusterSpec *cluster.Spec) []string { return noProxyList } + +func generateNutanixFailureDomains(eksNutanixFailureDomains []v1alpha1.NutanixDatacenterFailureDomain) []capxv1beta1.NutanixFailureDomain { + var failureDomains []capxv1beta1.NutanixFailureDomain + for _, fd := range eksNutanixFailureDomains { + + subnets := []capxv1beta1.NutanixResourceIdentifier{} + for _, subnet := range fd.Subnets { + subnets = append(subnets, capxv1beta1.NutanixResourceIdentifier{ + Type: capxv1beta1.NutanixIdentifierType(subnet.Type), + Name: subnet.Name, + UUID: subnet.UUID, + }) + } + + failureDomains = append(failureDomains, capxv1beta1.NutanixFailureDomain{ + Name: fd.Name, + Cluster: capxv1beta1.NutanixResourceIdentifier{ + Type: capxv1beta1.NutanixIdentifierType(fd.Cluster.Type), + Name: fd.Cluster.Name, + UUID: fd.Cluster.UUID, + }, + Subnets: subnets, + ControlPlane: true, + }) + } + return failureDomains +} diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index d931c0bf6040..037081059f57 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -686,6 +686,34 @@ func TestTemplateBuilderEtcdEncryptionKubernetes129(t *testing.T) { } } +func TestTemplateBuilderFailureDomains(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/cluster_nutanix_failure_domains.yaml", + Output: "testdata/expected_results_failure_domains.yaml", + }, + } { + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") + creds := GetCredsFromEnv() + + bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, + map[string]anywherev1.NutanixMachineConfigSpec{}, creds, time.Now) + + data, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + assert.NoError(t, err) + + test.AssertContentToFile(t, string(data), tc.Output) + } +} + func minimalNutanixConfigSpec(t *testing.T) (*anywherev1.NutanixDatacenterConfig, *anywherev1.NutanixMachineConfig, map[string]anywherev1.NutanixMachineConfigSpec) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf) diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml new file mode 100644 index 000000000000..c5750e15cfb5 --- /dev/null +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml @@ -0,0 +1,87 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: test + count: 1 + endpoint: + host: test + machineGroupRef: + name: test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + kind: NutanixMachineConfig + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster-1" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - name: "prism-subnet-1" + type: "name" + - uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + type: "uuid" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml new file mode 100644 index 000000000000..25f95fa4cf24 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml new file mode 100644 index 000000000000..91a7f99954f3 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-00005993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml new file mode 100644 index 000000000000..c4dda7d7650f --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_name.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "FIZZBUZZ!!!!" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml new file mode 100644 index 000000000000..a35a86b484b2 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + insecure: true + failureDomains: + - name: "pe1" + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-000062d6b497" + - name: "pe2" + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" diff --git a/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml new file mode 100644 index 000000000000..b3ff855aa819 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml @@ -0,0 +1,631 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "test" + namespace: "eksa-system" +spec: + failureDomains: + - name: "pe1" + cluster: + type: "name" + name: "prism-cluster-1" + subnets: + - type: "name" + name: "prism-subnet-1" + - type: "uuid" + uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + controlPlane: true + - name: "pe2" + cluster: + type: "uuid" + uuid: "468b7b36-d15b-406a-90f7-46d1560c4f4e" + subnets: + - type: "name" + name: "prism-subnet-1" + - type: "uuid" + uuid: "3e716c09-0613-46f3-b46a-beb89aa02295" + controlPlane: true + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-test" + kind: Secret + controlPlaneEndpoint: + host: "test" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "test" + name: "test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "test" + namespace: "eksa-system" +spec: + replicas: 1 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image-1-19" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "test" + resources: + - kind: ConfigMap + name: test-nutanix-ccm + - kind: Secret + name: test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/validator.go b/pkg/providers/nutanix/validator.go index 2bce2a5c543c..d5cf8ad732ff 100644 --- a/pkg/providers/nutanix/validator.go +++ b/pkg/providers/nutanix/validator.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "regexp" "strconv" "strings" @@ -126,6 +127,37 @@ func (v *Validator) ValidateDatacenterConfig(ctx context.Context, client Client, return err } + if err := v.validateFailureDomains(ctx, client, config); err != nil { + return err + } + + return nil +} + +func (v *Validator) validateFailureDomains(ctx context.Context, client Client, config *anywherev1.NutanixDatacenterConfig) error { + regexName, err := regexp.Compile("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + if err != nil { + return err + } + + for _, fd := range config.Spec.FailureDomains { + if res := regexName.MatchString(fd.Name); !res { + errorStr := `failure domain name should contains only small letters, digits, and hyphens. + It should start with small letter or digit` + return fmt.Errorf(errorStr) + } + + if err := v.validateClusterConfig(ctx, client, fd.Cluster); err != nil { + return err + } + + for _, subnet := range fd.Subnets { + if err := v.validateSubnetConfig(ctx, client, subnet); err != nil { + return err + } + } + } + return nil } diff --git a/pkg/providers/nutanix/validator_test.go b/pkg/providers/nutanix/validator_test.go index 802a365b3654..01fdb204aa17 100644 --- a/pkg/providers/nutanix/validator_test.go +++ b/pkg/providers/nutanix/validator_test.go @@ -5,7 +5,9 @@ import ( _ "embed" "encoding/json" "errors" + "fmt" "net/http" + "strings" "testing" "github.com/golang/mock/gomock" @@ -45,6 +47,18 @@ var nutanixDatacenterConfigSpecWithInvalidCredentialRefKind string //go:embed testdata/datacenterConfig_empty_credentialRef_name.yaml var nutanixDatacenterConfigSpecWithEmptyCredentialRefName string +//go:embed testdata/datacenterConfig_with_failure_domains.yaml +var nutanixDatacenterConfigSpecWithFailureDomain string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_name.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidName string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_cluster.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidCluster string + +//go:embed testdata/datacenterConfig_with_failure_domains_invalid_subnet.yaml +var nutanixDatacenterConfigSpecWithFailureDomainInvalidSubnet string + func fakeClusterList() *v3.ClusterListIntentResponse { return &v3.ClusterListIntentResponse{ Entities: []*v3.ClusterIntentResponse{ @@ -82,6 +96,96 @@ func fakeSubnetList() *v3.SubnetListIntentResponse { } } +func fakeClusterListForDCTest(filter *string) (*v3.ClusterListIntentResponse, error) { + data := &v3.ClusterListIntentResponse{ + Entities: []*v3.ClusterIntentResponse{ + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("a15f6966-bfc7-4d1e-8575-224096fc1cdb"), + }, + Spec: &v3.Cluster{ + Name: utils.StringPtr("prism-cluster"), + }, + Status: &v3.ClusterDefStatus{ + Resources: &v3.ClusterObj{ + Config: &v3.ClusterConfig{ + ServiceList: []*string{utils.StringPtr("AOS")}, + }, + }, + }, + }, + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("4d69ca7d-022f-49d1-a454-74535993bda4"), + }, + Spec: &v3.Cluster{ + Name: utils.StringPtr("prism-cluster-1"), + }, + Status: &v3.ClusterDefStatus{ + Resources: &v3.ClusterObj{ + Config: &v3.ClusterConfig{ + ServiceList: []*string{utils.StringPtr("AOS")}, + }, + }, + }, + }, + }, + } + + result := &v3.ClusterListIntentResponse{ + Entities: []*v3.ClusterIntentResponse{}, + } + + if filter != nil && *filter != "" { + str := strings.Replace(*filter, "name==", "", -1) + for _, cluster := range data.Entities { + if str == *cluster.Spec.Name { + result.Entities = append(result.Entities, cluster) + } + } + } + + return result, nil +} + +func fakeSubnetListForDCTest(filter *string) (*v3.SubnetListIntentResponse, error) { + data := &v3.SubnetListIntentResponse{ + Entities: []*v3.SubnetIntentResponse{ + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("b15f6966-bfc7-4d1e-8575-224096fc1cdb"), + }, + Spec: &v3.Subnet{ + Name: utils.StringPtr("prism-subnet"), + }, + }, + { + Metadata: &v3.Metadata{ + UUID: utils.StringPtr("2d166190-7759-4dc6-b835-923262d6b497"), + }, + Spec: &v3.Subnet{ + Name: utils.StringPtr("prism-subnet-1"), + }, + }, + }, + } + + result := &v3.SubnetListIntentResponse{ + Entities: []*v3.SubnetIntentResponse{}, + } + + if filter != nil && *filter != "" { + str := strings.Replace(*filter, "name==", "", -1) + for _, subnet := range data.Entities { + if str == *subnet.Spec.Name { + result.Entities = append(result.Entities, subnet) + } + } + } + + return result, nil +} + func fakeImageList() *v3.ImageListIntentResponse { return &v3.ImageListIntentResponse{ Entities: []*v3.ImageIntentResponse{ @@ -596,11 +700,45 @@ func TestNutanixValidatorValidateDatacenterConfig(t *testing.T) { dcConfFile: nutanixDatacenterConfigSpecWithEmptyCredentialRefName, expectErr: true, }, + { + name: "valid failure domains", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomain, + expectErr: false, + }, + { + name: "failure domain with invalid name", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidName, + expectErr: true, + }, + { + name: "failure domain with invalid cluster", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidCluster, + expectErr: true, + }, + { + name: "failure domains with invalid subnet", + dcConfFile: nutanixDatacenterConfigSpecWithFailureDomainInvalidSubnet, + expectErr: true, + }, } ctrl := gomock.NewController(t) mockClient := mocknutanix.NewMockClient(ctrl) mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.ClusterListIntentResponse, error) { + return fakeClusterListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.SubnetListIntentResponse, error) { + return fakeSubnetListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().GetSubnet(gomock.Any(), gomock.Eq("2d166190-7759-4dc6-b835-923262d6b497")).Return(nil, nil).AnyTimes() + mockClient.EXPECT().GetSubnet(gomock.Any(), gomock.Not("2d166190-7759-4dc6-b835-923262d6b497")).Return(nil, fmt.Errorf("")).AnyTimes() + mockClient.EXPECT().GetCluster(gomock.Any(), gomock.Eq("4d69ca7d-022f-49d1-a454-74535993bda4")).Return(nil, nil).AnyTimes() + mockClient.EXPECT().GetCluster(gomock.Any(), gomock.Not("4d69ca7d-022f-49d1-a454-74535993bda4")).Return(nil, fmt.Errorf("")).AnyTimes() mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() From 8a824344fd074dcc82056b8e4587be7a4d626156 Mon Sep 17 00:00:00 2001 From: Sai Date: Fri, 7 Jun 2024 15:25:08 -0400 Subject: [PATCH 189/193] Added instructions to update kubeconfig after manual certs renewal or cluster upgrade (#7890) * added instructions to update kubeconfig * Updated active subscriptions deletion notes * changes after review comments * added br copy certs commands * fix kubeconifg section * fixed kubeconfig naming * fixed kubeconfig path * fixed grammer and syntax * fixed username * docs/ * switch to sftp - avail by default in ubuntu and bt * added username comment * fixed kubeconfig user naming for BR * Update manually-renew-certs.md * Update docs/content/en/docs/clustermgmt/security/manually-renew-certs.md Co-authored-by: Chris Negus * addition new line to fix syntax * fix closing tab pane --------- Co-authored-by: Veronica4036 <60287165+Veronica4036@users.noreply.github.com> Co-authored-by: Chris Negus --- .../security/manually-renew-certs.md | 78 ++++++++++++++++++- .../support/purchase-subscription.md | 2 +- 2 files changed, 77 insertions(+), 3 deletions(-) diff --git a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md index 16097215c955..57d878594080 100644 --- a/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md +++ b/docs/content/en/docs/clustermgmt/security/manually-renew-certs.md @@ -84,8 +84,9 @@ ${IMAGE_ID} tmp-cert-renew \ {{< tab header="Ubuntu or RHEL" lang="bash" >}} sudo etcdctl --cacert=/etc/etcd/pki/ca.crt --cert=/etc/etcd/pki/etcdctl-etcd-client.crt --key=/etc/etcd/pki/etcdctl-etcd-client.key member list {{< /tab >}} + {{< tab header="Bottlerocket" lang="bash" >}} -ETCD_CONTAINER_ID=$(ctr -n k8s.io c ls | grep -w "etcd-io" | cut -d " " -f1) +ETCD_CONTAINER_ID=$(ctr -n k8s.io c ls | grep -w "etcd-io" | cut -d " " -f1 | tail -1) ctr -n k8s.io t exec -t --exec-id etcd ${ETCD_CONTAINER_ID} etcdctl \ --cacert=/var/lib/etcd/pki/ca.crt \ --cert=/var/lib/etcd/pki/server.crt \ @@ -153,7 +154,17 @@ ${IMAGE_ID} tmp-cert-renew \ {{< /tab >}} {{< /tabpane >}} -3. If you have external etcd nodes, manually replace the `apiserver-etcd-client.crt` and `apiserver-etcd-client.key` file in `/etc/kubernetes/pki` (or `/var/lib/kubeadm/pki` in Bottlerocket) folder with the files you saved from any etcd node. +3. If you have external etcd nodes, manually replace the `server-etcd-client.crt` and `apiserver-etcd-client.key` files in the `/etc/kubernetes/pki` (or `/var/lib/kubeadm/pki` in Bottlerocket) folder with the files you saved from any etcd node. + + - **For Bottlerocket**: + + ``` + cp apiserver-etcd-client.key /tmp/ + cp server-etcd-client.crt /tmp/ + sudo sheltie + cp /run/host-containerd/io.containerd.runtime.v2.task/default/admin/rootfs/tmp/apiserver-etcd-client.key /var/lib/kubeadm/pki/ + cp /run/host-containerd/io.containerd.runtime.v2.task/default/admin/rootfs/tmp/server-etcd-client.crt /var/lib/kubeadm/pki/ + ``` 4. Restart static control plane pods. @@ -219,3 +230,66 @@ etcd: - https://xxx.xxx.xxx.xxx:2379 - https://xxx.xxx.xxx.xxx:2379 ``` + +### What do I do if my local kubeconfig has expired? + +Your local kubeconfig, used to interact with the cluster, contains a certificate that expires after 1 year. When you rotate cluster certificates, a new kubeconfig with a new certificate is created as a Secret in the cluster. If you do not retrieve the new kubeconfig and your local kubeconfig certificate expires, you will receive the following error: + +``` +Error: Couldn't get current Server API group list: the server has asked for the client to provide credentials error: you must be logged in to the server. +This error typically occurs when the cluster certificates have been renewed or extended during the upgrade process. To resolve this issue, you need to update your local kubeconfig file with the new cluster credentials. +``` + +You can extract your new kubeconfig using the following steps. + +1. You can extract your new kubeconfig by SSHing to one of the Control Plane nodes, exporting kubeconfig from the secret object, and copying kubeconfig file to `/tmp` directory, as shown here: + +``` +ssh -i @ # USER_NAME should be ec2-user for bottlerocket, ubuntu for Ubuntu ControlPlane machine Operating System + +``` + +{{< tabpane >}} +{{< tab header="Ubuntu or RHEL" lang="bash" >}} + +export CLUSTER_NAME="" + +cat /var/lib/kubeadm/admin.conf +export KUBECONFIG="/var/lib/kubeadm/admin.conf" + +kubectl get secret ${CLUSTER_NAME}-kubeconfig -n eksa-system -o yaml -o=jsonpath="{.data.value}" | base64 --decode > /tmp/user-admin.kubeconfig + +{{< /tab >}} + +{{< tab header="Bottlerocket" lang="bash" >}} + +# You would need to be in the admin container when you ssh to the Bottlerocket machine +# open a root shell +sudo sheltie + +cat /var/lib/kubeadm/admin.conf + +cat /var/lib/kubeadm/admin.conf > /run/host-containerd/io.containerd.runtime.v2.task/default/admin/rootfs/tmp/kubernetes-admin.kubeconfig +exit # exit from the sudo sheltie container + +export CLUSTER_NAME="" +export KUBECONFIG="/tmp/kubernetes-admin.kubeconfig" +kubectl get secret ${CLUSTER_NAME}-kubeconfig -n eksa-system -o yaml -o=jsonpath="{.data.value}" | base64 --decode > /tmp/user-admin.kubeconfig +exit # exit from the Control Plane Machine + +{{< /tab >}} +{{< /tabpane >}} +Note: Install kubectl on the Control Plane Machine using the instructions [here](https://anywhere.eks.amazonaws.com/docs/getting-started/install/#manually-macos-and-linux) + +2. From your admin machine, download the kubeconfig file from the ControlPlane node and use it to access your Kubernetes Cluster. + +``` +ssh + +export CONTROLPLANE_IP="" +sftp -i @${CONTROLPLANE_IP}:/tmp/user-admin.kubeconfig . # USER_NAME should be ec2-user for bottlerocket, ubuntu for Ubuntu ControlPlane machine + +ls -ltr +export KUBECONFIG="user-admin.kubeconfig" + +kubectl get pods diff --git a/docs/content/en/docs/clustermgmt/support/purchase-subscription.md b/docs/content/en/docs/clustermgmt/support/purchase-subscription.md index 4eadf44b9be5..e5aa699a15e8 100644 --- a/docs/content/en/docs/clustermgmt/support/purchase-subscription.md +++ b/docs/content/en/docs/clustermgmt/support/purchase-subscription.md @@ -184,7 +184,7 @@ aws eks tag-resource \ ## Delete Subscriptions ->**_NOTE_** Only inactive subscriptions can be deleted. Deleting inactive subscriptions removes them from the AWS Management Console view and API responses. +>**_NOTE_** Only inactive subscriptions can be deleted. Deleting inactive subscriptions removes them from the AWS Management Console view and API responses. To delete any Active Subscriptions, please create a Support Case with AWS Support team. ### AWS Management Console From d48512061ab87d1a5c94f846a17f204e7ac7955a Mon Sep 17 00:00:00 2001 From: Cavaughn Browne <113555337+cxbrowne1207@users.noreply.github.com> Date: Fri, 7 Jun 2024 16:14:08 -0500 Subject: [PATCH 190/193] update image-build support baremental ubutu uefi only (#8235) --- docs/content/en/docs/osmgmt/artifacts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/docs/osmgmt/artifacts.md b/docs/content/en/docs/osmgmt/artifacts.md index 648fa41f2a20..243b296f6752 100644 --- a/docs/content/en/docs/osmgmt/artifacts.md +++ b/docs/content/en/docs/osmgmt/artifacts.md @@ -1054,7 +1054,7 @@ The table below shows the possible firmware options for the hypervisor and OS co | | vSphere | Baremetal | CloudStack | Nutanix | Snow | |:----------:|:-------------------:|:-------------------:|:----------:|:-------:|:----:| -| **Ubuntu** | bios (default), efi | bios, efi (default) | bios | bios | bios | +| **Ubuntu** | bios (default), efi | efi | bios | bios | bios | | **RHEL** | bios | bios | bios | bios | bios | ### Mounting additional files From 924f45b62b58d6a28dd4e5f10a0a96c9221544d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Jun 2024 19:52:40 -0700 Subject: [PATCH 191/193] Bump github.com/aws/aws-sdk-go-v2 from 1.27.0 to 1.27.2 in /release/cli (#8267) Bumps [github.com/aws/aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) from 1.27.0 to 1.27.2. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.27.0...v1.27.2) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 609fb83c0f7b..7f8d5c4064a2 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -4,7 +4,7 @@ go 1.22.3 require ( github.com/aws/aws-sdk-go v1.53.12 - github.com/aws/aws-sdk-go-v2 v1.27.0 + github.com/aws/aws-sdk-go-v2 v1.27.2 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e github.com/fsouza/go-dockerclient v1.11.0 diff --git a/release/cli/go.sum b/release/cli/go.sum index 0db305bb5311..1bae58eb7d72 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -58,8 +58,8 @@ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:o github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.53.12 h1:8f8K+YaTy2qwtGwVIo2Ftq22UCH96xQAX7Q0lyZKDiA= github.com/aws/aws-sdk-go v1.53.12/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo= -github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8= +github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e/go.mod h1:p/KHVJAMv3kofnUnShkZ6pUnZYzm+LK2G7bIi8nnTKA= github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= From 798e9cf98f78d13cc382cc0cf934e4aad4236d8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Jun 2024 19:52:49 -0700 Subject: [PATCH 192/193] Bump github.com/spf13/viper from 1.18.2 to 1.19.0 in /release/cli (#8247) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.18.2 to 1.19.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.18.2...v1.19.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 22 +++++++++---------- release/cli/go.sum | 54 +++++++++++++++++++++++++--------------------- 2 files changed, 41 insertions(+), 35 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index 7f8d5c4064a2..e7133733b464 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -14,7 +14,7 @@ require ( github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.8.0 - github.com/spf13/viper v1.18.2 + github.com/spf13/viper v1.19.0 golang.org/x/sync v0.7.0 helm.sh/helm/v3 v3.15.1 k8s.io/apimachinery v0.30.1 @@ -89,7 +89,7 @@ require ( github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.0 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.9 // indirect @@ -118,7 +118,7 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_golang v1.18.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect @@ -139,26 +139,26 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect - go.opentelemetry.io/otel v1.20.0 // indirect - go.opentelemetry.io/otel/metric v1.20.0 // indirect - go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.24.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sys v0.19.0 // indirect golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.20.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect - google.golang.org/grpc v1.59.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/grpc v1.62.1 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/release/cli/go.sum b/release/cli/go.sum index 1bae58eb7d72..94344d671fda 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -283,6 +283,8 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= @@ -299,6 +301,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -382,8 +385,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -503,8 +506,8 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -584,14 +587,15 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -639,14 +643,14 @@ go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U= -go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= -go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= -go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= -go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= -go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= -go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -700,7 +704,6 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -717,8 +720,8 @@ golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -768,6 +771,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -811,22 +815,22 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -835,6 +839,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 560fbf4eed4125e3209817365a0122212171ba1d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Jun 2024 20:43:32 -0700 Subject: [PATCH 193/193] Bump github.com/aws/aws-sdk-go from 1.53.12 to 1.53.19 in /release/cli (#8266) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.53.12 to 1.53.19. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.53.12...v1.53.19) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- release/cli/go.mod | 2 +- release/cli/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/cli/go.mod b/release/cli/go.mod index e7133733b464..92c4c41819eb 100644 --- a/release/cli/go.mod +++ b/release/cli/go.mod @@ -3,7 +3,7 @@ module github.com/aws/eks-anywhere/release/cli go 1.22.3 require ( - github.com/aws/aws-sdk-go v1.53.12 + github.com/aws/aws-sdk-go v1.53.19 github.com/aws/aws-sdk-go-v2 v1.27.2 github.com/aws/eks-anywhere v0.18.0 github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e diff --git a/release/cli/go.sum b/release/cli/go.sum index 94344d671fda..c800a136d007 100644 --- a/release/cli/go.sum +++ b/release/cli/go.sum @@ -56,8 +56,8 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-sdk-go v1.38.40/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.12 h1:8f8K+YaTy2qwtGwVIo2Ftq22UCH96xQAX7Q0lyZKDiA= -github.com/aws/aws-sdk-go v1.53.12/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.19 h1:WEuWc918RXlIaPCyU11F7hH9H1ItK+8m2c/uoQNRUok= +github.com/aws/aws-sdk-go v1.53.19/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8= github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= github.com/aws/eks-distro-build-tooling/release v0.0.0-20211103003257-a7e2379eae5e h1:GB6Cn9yKEt31mDF7RrVWyM9WoppNkGYth8zBPIJGJ+w=