diff --git a/Makefile b/Makefile index 84d7b877b809..bc1293c39d8a 100644 --- a/Makefile +++ b/Makefile @@ -536,14 +536,14 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/providers/vsphere/setupuser/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/providers/vsphere/setupuser" GovcClient ${MOCKGEN} -destination=pkg/govmomi/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/govmomi" VSphereClient,VMOMIAuthorizationManager,VMOMIFinder,VMOMISessionBuilder,VMOMIFinderBuilder,VMOMIAuthorizationManagerBuilder ${MOCKGEN} -destination=pkg/filewriter/mocks/filewriter.go -package=mocks "github.com/aws/eks-anywhere/pkg/filewriter" FileWriter - ${MOCKGEN} -destination=pkg/clustermanager/mocks/client_and_networking.go -package=mocks "github.com/aws/eks-anywhere/pkg/clustermanager" ClusterClient,Networking,AwsIamAuth,EKSAComponents,KubernetesClient,ClientFactory,ClusterApplier + ${MOCKGEN} -destination=pkg/clustermanager/mocks/client_and_networking.go -package=mocks "github.com/aws/eks-anywhere/pkg/clustermanager" ClusterClient,Networking,AwsIamAuth,EKSAComponents,KubernetesClient,ClientFactory,ClusterApplier,CAPIClient ${MOCKGEN} -destination=pkg/gitops/flux/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/gitops/flux" FluxClient,KubeClient,GitOpsFluxClient,GitClient,Templater ${MOCKGEN} -destination=pkg/task/mocks/task.go -package=mocks "github.com/aws/eks-anywhere/pkg/task" Task ${MOCKGEN} -destination=pkg/bootstrapper/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/bootstrapper" KindClient,KubernetesClient ${MOCKGEN} -destination=pkg/bootstrapper/mocks/bootstrapper.go -package=mocks "github.com/aws/eks-anywhere/pkg/bootstrapper" ClusterClient ${MOCKGEN} -destination=pkg/git/providers/github/mocks/github.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/providers/github" GithubClient ${MOCKGEN} -destination=pkg/git/mocks/git.go -package=mocks "github.com/aws/eks-anywhere/pkg/git" Client,ProviderClient - ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory + ${MOCKGEN} -destination=pkg/workflows/interfaces/mocks/clients.go -package=mocks "github.com/aws/eks-anywhere/pkg/workflows/interfaces" Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller ${MOCKGEN} -destination=pkg/git/gogithub/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gogithub" Client ${MOCKGEN} -destination=pkg/git/gitclient/mocks/client.go -package=mocks "github.com/aws/eks-anywhere/pkg/git/gitclient" GoGit ${MOCKGEN} -destination=pkg/validations/mocks/docker.go -package=mocks "github.com/aws/eks-anywhere/pkg/validations" DockerExecutable diff --git a/pkg/clustermanager/client.go b/pkg/clustermanager/client.go index 644b2bf474ca..19da3e558fb7 100644 --- a/pkg/clustermanager/client.go +++ b/pkg/clustermanager/client.go @@ -2,12 +2,19 @@ package clustermanager import ( "context" - "fmt" + eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" + "github.com/aws/eks-anywhere/pkg/executables" + "github.com/aws/eks-anywhere/pkg/filewriter" "github.com/aws/eks-anywhere/pkg/types" + releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) // KubernetesClient allows to interact with the k8s api server. @@ -19,24 +26,51 @@ type KubernetesClient interface { WaitForDeployment(ctx context.Context, cluster *types.Cluster, timeout string, condition string, target string, namespace string) error UpdateAnnotationInNamespace(ctx context.Context, resourceType, objectName string, annotations map[string]string, cluster *types.Cluster, namespace string) error RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error + CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error + PauseCAPICluster(ctx context.Context, cluster, kubeconfig string) error + ResumeCAPICluster(ctx context.Context, cluster, kubeconfig string) error + ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error + DeleteGitOpsConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error + DeleteEKSACluster(ctx context.Context, cluster *types.Cluster, name string, namespace string) error + DeleteAWSIamConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error + DeleteOIDCConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error + DeleteCluster(ctx context.Context, cluster, clusterToDelete *types.Cluster) error + WaitForClusterReady(ctx context.Context, cluster *types.Cluster, timeout string, clusterName string) error + WaitForControlPlaneAvailable(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error + WaitForControlPlaneReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error + WaitForControlPlaneNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error + WaitForManagedExternalEtcdReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error + WaitForManagedExternalEtcdNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error + GetEksaGitOpsConfig(ctx context.Context, gitOpsConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.GitOpsConfig, error) + GetEksaFluxConfig(ctx context.Context, fluxConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.FluxConfig, error) + GetEksaOIDCConfig(ctx context.Context, oidcConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.OIDCConfig, error) + GetEksaAWSIamConfig(ctx context.Context, awsIamConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.AWSIamConfig, error) + SaveLog(ctx context.Context, cluster *types.Cluster, deployment *types.Deployment, fileName string, writer filewriter.FileWriter) error + GetMachines(ctx context.Context, cluster *types.Cluster, clusterName string) ([]types.Machine, error) + GetClusters(ctx context.Context, cluster *types.Cluster) ([]types.CAPICluster, error) + GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error) + GetEksaVSphereDatacenterConfig(ctx context.Context, VSphereDatacenterName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error) + UpdateEnvironmentVariablesInNamespace(ctx context.Context, resourceType, resourceName string, envMap map[string]string, cluster *types.Cluster, namespace string) error + GetEksaVSphereMachineConfig(ctx context.Context, VSphereDatacenterName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereMachineConfig, error) + GetEksaCloudStackMachineConfig(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackMachineConfig, error) + SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error + DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error + ValidateControlPlaneNodes(ctx context.Context, cluster *types.Cluster, clusterName string) error + ValidateWorkerNodes(ctx context.Context, clusterName string, kubeconfigFile string) error + CountMachineDeploymentReplicasReady(ctx context.Context, clusterName string, kubeconfigFile string) (int, int, error) + GetBundles(ctx context.Context, kubeconfigFile, name, namespace string) (*releasev1alpha1.Bundles, error) + GetApiServerUrl(ctx context.Context, cluster *types.Cluster) (string, error) + KubeconfigSecretAvailable(ctx context.Context, kubeconfig string, clusterName string, namespace string) (bool, error) + DeleteOldWorkerNodeGroup(ctx context.Context, machineDeployment *clusterv1.MachineDeployment, kubeconfig string) error + GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error) + GetMachineDeploymentsForCluster(ctx context.Context, clusterName string, opts ...executables.KubectlOpt) ([]clusterv1.MachineDeployment, error) + GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error) + GetEksdRelease(ctx context.Context, name, namespace, kubeconfigFile string) (*eksdv1alpha1.Release, error) + GetConfigMap(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.ConfigMap, error) } -type clusterManagerClient struct { - ClusterClient -} - -func newClient(clusterClient ClusterClient) *clusterManagerClient { - return &clusterManagerClient{ClusterClient: clusterClient} -} - -func (c *clusterManagerClient) waitForDeployments(ctx context.Context, deploymentsByNamespace map[string][]string, cluster *types.Cluster, timeout string) error { - for namespace, deployments := range deploymentsByNamespace { - for _, deployment := range deployments { - err := c.WaitForDeployment(ctx, cluster, timeout, "Available", deployment, namespace) - if err != nil { - return fmt.Errorf("waiting for %s in namespace %s: %v", deployment, namespace, err) - } - } - } - return nil +// ClusterClient is an interface that has both the clusterctl client and the kubernetes retrier client. +type ClusterClient interface { + CAPIClient + KubernetesClient } diff --git a/pkg/clustermanager/cluster_manager.go b/pkg/clustermanager/cluster_manager.go index 4e18d6070494..61d766a91182 100644 --- a/pkg/clustermanager/cluster_manager.go +++ b/pkg/clustermanager/cluster_manager.go @@ -13,14 +13,12 @@ import ( "strings" "time" - eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/integer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/yaml" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -39,7 +37,6 @@ import ( "github.com/aws/eks-anywhere/pkg/retrier" "github.com/aws/eks-anywhere/pkg/templater" "github.com/aws/eks-anywhere/pkg/types" - releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) const ( @@ -80,7 +77,7 @@ var ( type ClusterManager struct { eksaComponents EKSAComponents ClientFactory ClientFactory - clusterClient *RetrierClient + clusterClient ClusterClient retrier *retrier.Retrier writer filewriter.FileWriter networking Networking @@ -106,54 +103,12 @@ type ClientFactory interface { BuildClientFromKubeconfig(kubeconfigPath string) (kubernetes.Client, error) } -type ClusterClient interface { - KubernetesClient +// CAPIClient has the clusterctl methods. +type CAPIClient interface { BackupManagement(ctx context.Context, cluster *types.Cluster, managementStatePath, clusterName string) error MoveManagement(ctx context.Context, from, target *types.Cluster, clusterName string) error - WaitForClusterReady(ctx context.Context, cluster *types.Cluster, timeout string, clusterName string) error - WaitForControlPlaneAvailable(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error - WaitForControlPlaneReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error - WaitForControlPlaneNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error - WaitForManagedExternalEtcdReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error - WaitForManagedExternalEtcdNotReady(ctx context.Context, cluster *types.Cluster, timeout string, newClusterName string) error - GetWorkloadKubeconfig(ctx context.Context, clusterName string, cluster *types.Cluster) ([]byte, error) - GetEksaGitOpsConfig(ctx context.Context, gitOpsConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.GitOpsConfig, error) - GetEksaFluxConfig(ctx context.Context, fluxConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.FluxConfig, error) - GetEksaOIDCConfig(ctx context.Context, oidcConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.OIDCConfig, error) - GetEksaAWSIamConfig(ctx context.Context, awsIamConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.AWSIamConfig, error) - DeleteCluster(ctx context.Context, managementCluster, clusterToDelete *types.Cluster) error - DeleteGitOpsConfig(ctx context.Context, managementCluster *types.Cluster, gitOpsName, namespace string) error - DeleteOIDCConfig(ctx context.Context, managementCluster *types.Cluster, oidcConfigName, oidcConfigNamespace string) error - DeleteAWSIamConfig(ctx context.Context, managementCluster *types.Cluster, awsIamConfigName, awsIamConfigNamespace string) error - DeleteEKSACluster(ctx context.Context, managementCluster *types.Cluster, eksaClusterName, eksaClusterNamespace string) error - DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error InitInfrastructure(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster, provider providers.Provider) error - WaitForDeployment(ctx context.Context, cluster *types.Cluster, timeout string, condition string, target string, namespace string) error - SaveLog(ctx context.Context, cluster *types.Cluster, deployment *types.Deployment, fileName string, writer filewriter.FileWriter) error - GetMachines(ctx context.Context, cluster *types.Cluster, clusterName string) ([]types.Machine, error) - GetClusters(ctx context.Context, cluster *types.Cluster) ([]types.CAPICluster, error) - PauseCAPICluster(ctx context.Context, cluster, kubeconfig string) error - ResumeCAPICluster(ctx context.Context, cluster, kubeconfig string) error - GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error) - GetEksaVSphereDatacenterConfig(ctx context.Context, VSphereDatacenterName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error) - UpdateEnvironmentVariablesInNamespace(ctx context.Context, resourceType, resourceName string, envMap map[string]string, cluster *types.Cluster, namespace string) error - GetEksaVSphereMachineConfig(ctx context.Context, VSphereDatacenterName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereMachineConfig, error) - GetEksaCloudStackMachineConfig(ctx context.Context, cloudstackMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.CloudStackMachineConfig, error) - SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error - CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error - ValidateControlPlaneNodes(ctx context.Context, cluster *types.Cluster, clusterName string) error - ValidateWorkerNodes(ctx context.Context, clusterName string, kubeconfigFile string) error - CountMachineDeploymentReplicasReady(ctx context.Context, clusterName string, kubeconfigFile string) (int, int, error) - GetBundles(ctx context.Context, kubeconfigFile, name, namespace string) (*releasev1alpha1.Bundles, error) - GetApiServerUrl(ctx context.Context, cluster *types.Cluster) (string, error) - KubeconfigSecretAvailable(ctx context.Context, kubeconfig string, clusterName string, namespace string) (bool, error) - DeleteOldWorkerNodeGroup(ctx context.Context, machineDeployment *clusterv1.MachineDeployment, kubeconfig string) error - GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error) - GetMachineDeploymentsForCluster(ctx context.Context, clusterName string, opts ...executables.KubectlOpt) ([]clusterv1.MachineDeployment, error) - GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error) - GetEksdRelease(ctx context.Context, name, namespace, kubeconfigFile string) (*eksdv1alpha1.Release, error) - ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error - GetConfigMap(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.ConfigMap, error) + GetWorkloadKubeconfig(ctx context.Context, clusterName string, cluster *types.Cluster) ([]byte, error) } type Networking interface { @@ -182,7 +137,7 @@ func DefaultRetrier() *retrier.Retrier { } // New constructs a new ClusterManager. -func New(client ClientFactory, clusterClient *RetrierClient, networking Networking, writer filewriter.FileWriter, diagnosticBundleFactory diagnostics.DiagnosticBundleFactory, awsIamAuth AwsIamAuth, eksaComponents EKSAComponents, opts ...ClusterManagerOpt) *ClusterManager { +func New(client ClientFactory, clusterClient ClusterClient, networking Networking, writer filewriter.FileWriter, diagnosticBundleFactory diagnostics.DiagnosticBundleFactory, awsIamAuth AwsIamAuth, eksaComponents EKSAComponents, opts ...ClusterManagerOpt) *ClusterManager { c := &ClusterManager{ eksaComponents: eksaComponents, ClientFactory: client, @@ -258,7 +213,6 @@ func WithNodeStartupTimeout(timeout time.Duration) ClusterManagerOpt { func WithRetrier(retrier *retrier.Retrier) ClusterManagerOpt { return func(c *ClusterManager) { - c.clusterClient.retrier = retrier c.retrier = retrier } } @@ -794,19 +748,19 @@ func (c *ClusterManager) InstallCAPI(ctx context.Context, clusterSpec *cluster.S } func (c *ClusterManager) waitForCAPI(ctx context.Context, cluster *types.Cluster, provider providers.Provider, externalEtcdTopology bool) error { - err := c.clusterClient.waitForDeployments(ctx, internal.CAPIDeployments, cluster, c.deploymentWaitTimeout.String()) + err := c.waitForDeployments(ctx, internal.CAPIDeployments, cluster, c.deploymentWaitTimeout.String()) if err != nil { return err } if externalEtcdTopology { - err := c.clusterClient.waitForDeployments(ctx, internal.ExternalEtcdDeployments, cluster, c.deploymentWaitTimeout.String()) + err := c.waitForDeployments(ctx, internal.ExternalEtcdDeployments, cluster, c.deploymentWaitTimeout.String()) if err != nil { return err } } - err = c.clusterClient.waitForDeployments(ctx, provider.GetDeployments(), cluster, c.deploymentWaitTimeout.String()) + err = c.waitForDeployments(ctx, provider.GetDeployments(), cluster, c.deploymentWaitTimeout.String()) if err != nil { return err } @@ -814,6 +768,18 @@ func (c *ClusterManager) waitForCAPI(ctx context.Context, cluster *types.Cluster return nil } +func (c *ClusterManager) waitForDeployments(ctx context.Context, deploymentsByNamespace map[string][]string, cluster *types.Cluster, timeout string) error { + for namespace, deployments := range deploymentsByNamespace { + for _, deployment := range deployments { + err := c.clusterClient.WaitForDeployment(ctx, cluster, timeout, "Available", deployment, namespace) + if err != nil { + return fmt.Errorf("waiting for %s in namespace %s: %v", deployment, namespace, err) + } + } + } + return nil +} + func (c *ClusterManager) InstallNetworking(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) error { return c.networking.Install(ctx, cluster, clusterSpec, getProviderNamespaces(provider.GetDeployments())) } diff --git a/pkg/clustermanager/cluster_manager_test.go b/pkg/clustermanager/cluster_manager_test.go index 33bb65bcc308..891bcad30cbc 100644 --- a/pkg/clustermanager/cluster_manager_test.go +++ b/pkg/clustermanager/cluster_manager_test.go @@ -616,15 +616,11 @@ func TestClusterManagerUpgradeSelfManagedClusterFailClientError(t *testing.T) { tt := newSpecChangedTest(t) mockCtrl := gomock.NewController(t) - m := &clusterManagerMocks{ - client: mocksmanager.NewMockClusterClient(mockCtrl), - } - client := clustermanager.NewRetrierClient(m.client, clustermanager.DefaultRetrier()) + cf := mocksmanager.NewMockClientFactory(mockCtrl) cf.EXPECT().BuildClientFromKubeconfig("").Return(nil, errors.New("can't build client")) - c := clustermanager.New(cf, client, nil, nil, nil, nil, nil) + c := clustermanager.New(cf, tt.mocks.client, nil, nil, nil, nil, nil) tt.clusterManager = c - tt.mocks.client = m.client tt.mocks.client.EXPECT().GetEksaCluster(tt.ctx, tt.cluster, tt.clusterSpec.Cluster.Name).Return(tt.oldClusterConfig, nil) @@ -1325,7 +1321,6 @@ func TestClusterManagerBackupCAPIRetrySuccess(t *testing.T) { ctx := context.Background() c, m := newClusterManager(t) - // m.client.EXPECT().BackupManagement(ctx, from, managementStatePath) firstTry := m.client.EXPECT().BackupManagement(ctx, from, managementStatePath, from.Name).Return(errors.New("Error: failed to connect to the management cluster: action failed after 9 attempts: Get \"https://127.0.0.1:61994/api?timeout=30s\": EOF")) secondTry := m.client.EXPECT().BackupManagement(ctx, from, managementStatePath, from.Name).Return(nil) gomock.InOrder( @@ -1837,11 +1832,10 @@ func TestClusterManagerCreateEKSAResourcesFailureBundles(t *testing.T) { diagnosticsBundle: mocksdiagnostics.NewMockDiagnosticBundle(mockCtrl), eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), } - client := clustermanager.NewRetrierClient(m.client, retrier.NewWithMaxRetries(1, 1)) fakeClient := test.NewFakeKubeClient() cf := mocksmanager.NewMockClientFactory(mockCtrl) cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() - c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) + c := clustermanager.New(cf, m.client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(nil) m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), gomock.Any()).Return(nil) @@ -1883,11 +1877,10 @@ func TestClusterManagerCreateEKSAResourcesFailureEKSARelease(t *testing.T) { diagnosticsBundle: mocksdiagnostics.NewMockDiagnosticBundle(mockCtrl), eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), } - client := clustermanager.NewRetrierClient(m.client, retrier.NewWithMaxRetries(1, 1)) fakeClient := test.NewFakeKubeClient() cf := mocksmanager.NewMockClientFactory(mockCtrl) cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() - c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) + c := clustermanager.New(cf, m.client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(nil) m.client.EXPECT().GetConfigMap(ctx, tt.cluster.KubeconfigFile, gomock.Any(), gomock.Any()).Return(configMap, nil) @@ -1918,11 +1911,10 @@ func TestClusterManagerCreateEKSAResourcesNewUpgraderConfigMap(t *testing.T) { diagnosticsBundle: mocksdiagnostics.NewMockDiagnosticBundle(mockCtrl), eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), } - client := clustermanager.NewRetrierClient(m.client, retrier.NewWithMaxRetries(1, 1)) fakeClient := test.NewFakeKubeClient() cf := mocksmanager.NewMockClientFactory(mockCtrl) cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() - c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) + c := clustermanager.New(cf, m.client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(nil) m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, gomock.Any()) @@ -1972,11 +1964,10 @@ func TestClusterManagerCreateEKSAResourcesFailureApplyUpgraderConfigMap(t *testi diagnosticsBundle: mocksdiagnostics.NewMockDiagnosticBundle(mockCtrl), eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), } - client := clustermanager.NewRetrierClient(m.client, retrier.NewWithMaxRetries(1, 1)) fakeClient := test.NewFakeKubeClient() cf := mocksmanager.NewMockClientFactory(mockCtrl) cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() - c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) + c := clustermanager.New(cf, m.client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(nil) m.client.EXPECT().GetConfigMap(ctx, tt.cluster.KubeconfigFile, gomock.Any(), gomock.Any()).Return(configMap, nil) @@ -2700,7 +2691,6 @@ func TestClusterManagerClusterSpecChangedNewEksdReleaseWorkers(t *testing.T) { eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), } - client := clustermanager.NewRetrierClient(m.client, clustermanager.DefaultRetrier()) clusterName := "cluster-name" dc := &v1alpha1.VSphereDatacenterConfig{ ObjectMeta: metav1.ObjectMeta{ @@ -2728,7 +2718,7 @@ func TestClusterManagerClusterSpecChangedNewEksdReleaseWorkers(t *testing.T) { fakeClient := test.NewFakeKubeClient(dc, oc, b, r, ac, gc, er, r2) cf := mocksmanager.NewMockClientFactory(mockCtrl) cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() - c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) + c := clustermanager.New(cf, m.client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) tt := newSpecChangedTest(t) tt.clusterManager = c @@ -2871,7 +2861,6 @@ func newClusterManager(t *testing.T, opts ...clustermanager.ClusterManagerOpt) ( eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), } - client := clustermanager.NewRetrierClient(m.client, clustermanager.DefaultRetrier()) clusterName := "cluster-name" dc := &v1alpha1.VSphereDatacenterConfig{ ObjectMeta: metav1.ObjectMeta{ @@ -2896,7 +2885,7 @@ func newClusterManager(t *testing.T, opts ...clustermanager.ClusterManagerOpt) ( fakeClient := test.NewFakeKubeClient(dc, oc, b, r, ac, gc, er) cf := mocksmanager.NewMockClientFactory(mockCtrl) cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() - c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents, opts...) + c := clustermanager.New(cf, m.client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents, opts...) return c, m } diff --git a/pkg/clustermanager/eksa_installer.go b/pkg/clustermanager/eksa_installer.go index 090c479c6034..00ed0c654341 100644 --- a/pkg/clustermanager/eksa_installer.go +++ b/pkg/clustermanager/eksa_installer.go @@ -14,6 +14,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/yaml" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" @@ -58,26 +59,19 @@ func WithEKSAInstallerNoTimeouts() EKSAInstallerOpt { // Install configures and applies eks-a components in a cluster accordingly to a spec. func (i *EKSAInstaller) Install(ctx context.Context, log logr.Logger, cluster *types.Cluster, spec *cluster.Spec) error { - generator := EKSAComponentGenerator{log: log, reader: i.reader} - components, err := generator.buildEKSAComponentsSpec(spec) + err := i.createEKSAComponents(ctx, log, cluster, spec) if err != nil { - return err + return fmt.Errorf("applying EKSA components: %v", err) } - objs := make([]runtime.Object, 0, len(components.rest)+1) - objs = append(objs, components.deployment) - for _, o := range components.rest { - objs = append(objs, o) - } - - for _, o := range objs { - if err = i.client.Apply(ctx, cluster.KubeconfigFile, o); err != nil { - return fmt.Errorf("applying eksa components: %v", err) - } + err = i.applyBundles(ctx, log, cluster, spec) + if err != nil { + return fmt.Errorf("applying EKSA bundles: %v", err) } - if err := i.client.WaitForDeployment(ctx, cluster, i.deploymentWaitTimeout.String(), "Available", constants.EksaControllerManagerDeployment, constants.EksaSystemNamespace); err != nil { - return fmt.Errorf("waiting for eksa-controller-manager: %v", err) + err = i.applyReleases(ctx, log, cluster, spec) + if err != nil { + return fmt.Errorf("applying EKSA releases: %v", err) } return nil @@ -101,13 +95,70 @@ func (i *EKSAInstaller) Upgrade(ctx context.Context, log logr.Logger, c *types.C newVersionsBundle := newSpec.RootVersionsBundle() oldVersion := oldVersionsBundle.Eksa.Version newVersion := newVersionsBundle.Eksa.Version - if err := i.Install(ctx, log, c, newSpec); err != nil { + if err := i.createEKSAComponents(ctx, log, c, newSpec); err != nil { return nil, fmt.Errorf("upgrading EKS-A components from version %v to version %v: %v", oldVersion, newVersion, err) } return changeDiff, nil } +// createEKSAComponents creates eksa components and applies the objects to the cluster. +func (i *EKSAInstaller) createEKSAComponents(ctx context.Context, log logr.Logger, cluster *types.Cluster, spec *cluster.Spec) error { + generator := EKSAComponentGenerator{log: log, reader: i.reader} + components, err := generator.buildEKSAComponentsSpec(spec) + if err != nil { + return err + } + + objs := make([]runtime.Object, 0, len(components.rest)+1) + objs = append(objs, components.deployment) + for _, o := range components.rest { + objs = append(objs, o) + } + + for _, o := range objs { + if err = i.client.Apply(ctx, cluster.KubeconfigFile, o); err != nil { + return fmt.Errorf("applying eksa components: %v", err) + } + } + + if err := i.client.WaitForDeployment(ctx, cluster, i.deploymentWaitTimeout.String(), "Available", constants.EksaControllerManagerDeployment, constants.EksaSystemNamespace); err != nil { + return fmt.Errorf("waiting for eksa-controller-manager: %v", err) + } + + return nil +} + +// applyBundles applies the bundles to the cluster. +func (i *EKSAInstaller) applyBundles(ctx context.Context, log logr.Logger, cluster *types.Cluster, spec *cluster.Spec) error { + bundleObj, err := yaml.Marshal(spec.Bundles) + if err != nil { + return fmt.Errorf("outputting bundle yaml: %v", err) + } + + log.V(1).Info("Applying Bundles to cluster") + if err := i.client.ApplyKubeSpecFromBytes(ctx, cluster, bundleObj); err != nil { + return fmt.Errorf("applying bundle spec: %v", err) + } + + return nil +} + +// applyReleases applies the releases to the cluster. +func (i *EKSAInstaller) applyReleases(ctx context.Context, log logr.Logger, cluster *types.Cluster, spec *cluster.Spec) error { + releaseObj, err := yaml.Marshal(spec.EKSARelease) + if err != nil { + return fmt.Errorf("outputting release yaml: %v", err) + } + + log.V(1).Info("Applying EKSA Release to cluster") + if err := i.client.ApplyKubeSpecFromBytes(ctx, cluster, releaseObj); err != nil { + return fmt.Errorf("applying EKSA release spec: %v", err) + } + + return nil +} + // EKSAComponentGenerator generates and configures eks-a components. type EKSAComponentGenerator struct { log logr.Logger diff --git a/pkg/clustermanager/eksa_installer_test.go b/pkg/clustermanager/eksa_installer_test.go index e0ecd3562cf3..f2b9c2f9ad42 100644 --- a/pkg/clustermanager/eksa_installer_test.go +++ b/pkg/clustermanager/eksa_installer_test.go @@ -2,6 +2,7 @@ package clustermanager_test import ( "context" + "errors" "os" "strings" "testing" @@ -75,15 +76,90 @@ func TestEKSAInstallerInstallSuccessWithRealManifest(t *testing.T) { if err != nil { t.Fatalf("could not read eksa-components") } + + tt.newSpec.Bundles = &v1alpha1.Bundles{} + tt.newSpec.EKSARelease = &v1alpha1.EKSARelease{} manifest := string(file) expectedObjectCount := strings.Count(manifest, "\n---\n") tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{})) tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.Any()).Times(expectedObjectCount) tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system") + tt.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, gomock.Any()).Times(2) tt.Expect(tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec)).To(Succeed()) } +func TestEKSAInstallerInstallFailComponentsDeployment(t *testing.T) { + tt := newInstallerTest(t) + tt.newSpec.VersionsBundles["1.19"].Eksa.Components.URI = "../../config/manifest/eksa-components.yaml" + file, err := os.ReadFile("../../config/manifest/eksa-components.yaml") + if err != nil { + t.Fatalf("could not read eksa-components") + } + + manifest := string(file) + expectedObjectCount := strings.Count(manifest, "\n---\n") + tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{})) + tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.Any()).Times(expectedObjectCount) + tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system").Return(errors.New("test")) + + err = tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec) + tt.Expect(err.Error()).To(ContainSubstring("waiting for eksa-controller-manager")) +} + +func TestEKSAInstallerInstallFailComponents(t *testing.T) { + tt := newInstallerTest(t) + tt.newSpec.VersionsBundles["1.19"].Eksa.Components.URI = "../../config/manifest/eksa-components.yaml" + + tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{})).Return(errors.New("test")) + + err := tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec) + tt.Expect(err.Error()).To(ContainSubstring("applying eksa components")) +} + +func TestEKSAInstallerInstallFailBundles(t *testing.T) { + tt := newInstallerTest(t) + tt.newSpec.VersionsBundles["1.19"].Eksa.Components.URI = "../../config/manifest/eksa-components.yaml" + file, err := os.ReadFile("../../config/manifest/eksa-components.yaml") + if err != nil { + t.Fatalf("could not read eksa-components") + } + + tt.newSpec.Bundles = &v1alpha1.Bundles{} + tt.newSpec.EKSARelease = &v1alpha1.EKSARelease{} + manifest := string(file) + expectedObjectCount := strings.Count(manifest, "\n---\n") + tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{})) + tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.Any()).Times(expectedObjectCount) + tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system") + tt.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, gomock.Any()).Return(errors.New("test")) + + err = tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec) + tt.Expect(err.Error()).To(ContainSubstring("applying bundle spec")) +} + +func TestEKSAInstallerInstallFailEKSARelease(t *testing.T) { + tt := newInstallerTest(t) + tt.newSpec.VersionsBundles["1.19"].Eksa.Components.URI = "../../config/manifest/eksa-components.yaml" + file, err := os.ReadFile("../../config/manifest/eksa-components.yaml") + if err != nil { + t.Fatalf("could not read eksa-components") + } + + tt.newSpec.Bundles = &v1alpha1.Bundles{} + tt.newSpec.EKSARelease = &v1alpha1.EKSARelease{} + manifest := string(file) + expectedObjectCount := strings.Count(manifest, "\n---\n") + tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.AssignableToTypeOf(&appsv1.Deployment{})) + tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.Any()).Times(expectedObjectCount) + tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system") + tt.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, gomock.Any()) + tt.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, gomock.Any()).Return(errors.New("test")) + + err = tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec) + tt.Expect(err.Error()).To(ContainSubstring("applying EKSA release spec")) +} + func TestEKSAInstallerInstallSuccessWithTestManifest(t *testing.T) { tt := newInstallerTest(t) tt.newSpec.VersionsBundles["1.19"].Eksa.Components.URI = "testdata/eksa_components.yaml" @@ -146,6 +222,7 @@ func TestEKSAInstallerInstallSuccessWithTestManifest(t *testing.T) { tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, wantDeployment) tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, wantNamespace) tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, "30m0s", "Available", "eksa-controller-manager", "eksa-system") + tt.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, gomock.Any()).Times(2) tt.Expect(tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec)).To(Succeed()) } @@ -162,6 +239,7 @@ func TestEKSAInstallerInstallSuccessWithNoTimeout(t *testing.T) { expectedObjectCount := strings.Count(manifest, "\n---\n") tt.client.EXPECT().Apply(tt.ctx, tt.cluster.KubeconfigFile, gomock.Any()).Times(expectedObjectCount) tt.client.EXPECT().WaitForDeployment(tt.ctx, tt.cluster, maxTime.String(), "Available", "eksa-controller-manager", "eksa-system") + tt.client.EXPECT().ApplyKubeSpecFromBytes(tt.ctx, tt.cluster, gomock.Any()).Times(2) tt.Expect(tt.installer.Install(tt.ctx, test.NewNullLogger(), tt.cluster, tt.newSpec)).To(Succeed()) } diff --git a/pkg/clustermanager/mocks/client_and_networking.go b/pkg/clustermanager/mocks/client_and_networking.go index 46dbb88dc774..600a1c1feb24 100644 --- a/pkg/clustermanager/mocks/client_and_networking.go +++ b/pkg/clustermanager/mocks/client_and_networking.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/aws/eks-anywhere/pkg/clustermanager (interfaces: ClusterClient,Networking,AwsIamAuth,EKSAComponents,KubernetesClient,ClientFactory,ClusterApplier) +// Source: github.com/aws/eks-anywhere/pkg/clustermanager (interfaces: ClusterClient,Networking,AwsIamAuth,EKSAComponents,KubernetesClient,ClientFactory,ClusterApplier,CAPIClient) // Package mocks is a generated GoMock package. package mocks @@ -1084,6 +1084,447 @@ func (mr *MockKubernetesClientMockRecorder) ApplyKubeSpecFromBytesWithNamespace( return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyKubeSpecFromBytesWithNamespace", reflect.TypeOf((*MockKubernetesClient)(nil).ApplyKubeSpecFromBytesWithNamespace), arg0, arg1, arg2, arg3) } +// CountMachineDeploymentReplicasReady mocks base method. +func (m *MockKubernetesClient) CountMachineDeploymentReplicasReady(arg0 context.Context, arg1, arg2 string) (int, int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountMachineDeploymentReplicasReady", arg0, arg1, arg2) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(int) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// CountMachineDeploymentReplicasReady indicates an expected call of CountMachineDeploymentReplicasReady. +func (mr *MockKubernetesClientMockRecorder) CountMachineDeploymentReplicasReady(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountMachineDeploymentReplicasReady", reflect.TypeOf((*MockKubernetesClient)(nil).CountMachineDeploymentReplicasReady), arg0, arg1, arg2) +} + +// CreateNamespaceIfNotPresent mocks base method. +func (m *MockKubernetesClient) CreateNamespaceIfNotPresent(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNamespaceIfNotPresent", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateNamespaceIfNotPresent indicates an expected call of CreateNamespaceIfNotPresent. +func (mr *MockKubernetesClientMockRecorder) CreateNamespaceIfNotPresent(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNamespaceIfNotPresent", reflect.TypeOf((*MockKubernetesClient)(nil).CreateNamespaceIfNotPresent), arg0, arg1, arg2) +} + +// DeleteAWSIamConfig mocks base method. +func (m *MockKubernetesClient) DeleteAWSIamConfig(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAWSIamConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAWSIamConfig indicates an expected call of DeleteAWSIamConfig. +func (mr *MockKubernetesClientMockRecorder) DeleteAWSIamConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAWSIamConfig", reflect.TypeOf((*MockKubernetesClient)(nil).DeleteAWSIamConfig), arg0, arg1, arg2, arg3) +} + +// DeleteCluster mocks base method. +func (m *MockKubernetesClient) DeleteCluster(arg0 context.Context, arg1, arg2 *types.Cluster) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCluster", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteCluster indicates an expected call of DeleteCluster. +func (mr *MockKubernetesClientMockRecorder) DeleteCluster(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockKubernetesClient)(nil).DeleteCluster), arg0, arg1, arg2) +} + +// DeleteEKSACluster mocks base method. +func (m *MockKubernetesClient) DeleteEKSACluster(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteEKSACluster", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteEKSACluster indicates an expected call of DeleteEKSACluster. +func (mr *MockKubernetesClientMockRecorder) DeleteEKSACluster(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEKSACluster", reflect.TypeOf((*MockKubernetesClient)(nil).DeleteEKSACluster), arg0, arg1, arg2, arg3) +} + +// DeleteGitOpsConfig mocks base method. +func (m *MockKubernetesClient) DeleteGitOpsConfig(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteGitOpsConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteGitOpsConfig indicates an expected call of DeleteGitOpsConfig. +func (mr *MockKubernetesClientMockRecorder) DeleteGitOpsConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteGitOpsConfig", reflect.TypeOf((*MockKubernetesClient)(nil).DeleteGitOpsConfig), arg0, arg1, arg2, arg3) +} + +// DeleteOIDCConfig mocks base method. +func (m *MockKubernetesClient) DeleteOIDCConfig(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOIDCConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteOIDCConfig indicates an expected call of DeleteOIDCConfig. +func (mr *MockKubernetesClientMockRecorder) DeleteOIDCConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOIDCConfig", reflect.TypeOf((*MockKubernetesClient)(nil).DeleteOIDCConfig), arg0, arg1, arg2, arg3) +} + +// DeleteOldWorkerNodeGroup mocks base method. +func (m *MockKubernetesClient) DeleteOldWorkerNodeGroup(arg0 context.Context, arg1 *v1beta1.MachineDeployment, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteOldWorkerNodeGroup", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteOldWorkerNodeGroup indicates an expected call of DeleteOldWorkerNodeGroup. +func (mr *MockKubernetesClientMockRecorder) DeleteOldWorkerNodeGroup(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOldWorkerNodeGroup", reflect.TypeOf((*MockKubernetesClient)(nil).DeleteOldWorkerNodeGroup), arg0, arg1, arg2) +} + +// DeletePackageResources mocks base method. +func (m *MockKubernetesClient) DeletePackageResources(arg0 context.Context, arg1 *types.Cluster, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePackageResources", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePackageResources indicates an expected call of DeletePackageResources. +func (mr *MockKubernetesClientMockRecorder) DeletePackageResources(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePackageResources", reflect.TypeOf((*MockKubernetesClient)(nil).DeletePackageResources), arg0, arg1, arg2) +} + +// GetApiServerUrl mocks base method. +func (m *MockKubernetesClient) GetApiServerUrl(arg0 context.Context, arg1 *types.Cluster) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetApiServerUrl", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetApiServerUrl indicates an expected call of GetApiServerUrl. +func (mr *MockKubernetesClientMockRecorder) GetApiServerUrl(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetApiServerUrl", reflect.TypeOf((*MockKubernetesClient)(nil).GetApiServerUrl), arg0, arg1) +} + +// GetBundles mocks base method. +func (m *MockKubernetesClient) GetBundles(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha10.Bundles, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBundles", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha10.Bundles) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBundles indicates an expected call of GetBundles. +func (mr *MockKubernetesClientMockRecorder) GetBundles(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBundles", reflect.TypeOf((*MockKubernetesClient)(nil).GetBundles), arg0, arg1, arg2, arg3) +} + +// GetClusters mocks base method. +func (m *MockKubernetesClient) GetClusters(arg0 context.Context, arg1 *types.Cluster) ([]types.CAPICluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusters", arg0, arg1) + ret0, _ := ret[0].([]types.CAPICluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClusters indicates an expected call of GetClusters. +func (mr *MockKubernetesClientMockRecorder) GetClusters(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusters", reflect.TypeOf((*MockKubernetesClient)(nil).GetClusters), arg0, arg1) +} + +// GetConfigMap mocks base method. +func (m *MockKubernetesClient) GetConfigMap(arg0 context.Context, arg1, arg2, arg3 string) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfigMap", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetConfigMap indicates an expected call of GetConfigMap. +func (mr *MockKubernetesClientMockRecorder) GetConfigMap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigMap", reflect.TypeOf((*MockKubernetesClient)(nil).GetConfigMap), arg0, arg1, arg2, arg3) +} + +// GetEksaAWSIamConfig mocks base method. +func (m *MockKubernetesClient) GetEksaAWSIamConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.AWSIamConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaAWSIamConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha1.AWSIamConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaAWSIamConfig indicates an expected call of GetEksaAWSIamConfig. +func (mr *MockKubernetesClientMockRecorder) GetEksaAWSIamConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaAWSIamConfig", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaAWSIamConfig), arg0, arg1, arg2, arg3) +} + +// GetEksaCloudStackMachineConfig mocks base method. +func (m *MockKubernetesClient) GetEksaCloudStackMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.CloudStackMachineConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaCloudStackMachineConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha1.CloudStackMachineConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaCloudStackMachineConfig indicates an expected call of GetEksaCloudStackMachineConfig. +func (mr *MockKubernetesClientMockRecorder) GetEksaCloudStackMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCloudStackMachineConfig", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaCloudStackMachineConfig), arg0, arg1, arg2, arg3) +} + +// GetEksaCluster mocks base method. +func (m *MockKubernetesClient) GetEksaCluster(arg0 context.Context, arg1 *types.Cluster, arg2 string) (*v1alpha1.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaCluster", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1alpha1.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaCluster indicates an expected call of GetEksaCluster. +func (mr *MockKubernetesClientMockRecorder) GetEksaCluster(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaCluster", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaCluster), arg0, arg1, arg2) +} + +// GetEksaFluxConfig mocks base method. +func (m *MockKubernetesClient) GetEksaFluxConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.FluxConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaFluxConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha1.FluxConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaFluxConfig indicates an expected call of GetEksaFluxConfig. +func (mr *MockKubernetesClientMockRecorder) GetEksaFluxConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaFluxConfig", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaFluxConfig), arg0, arg1, arg2, arg3) +} + +// GetEksaGitOpsConfig mocks base method. +func (m *MockKubernetesClient) GetEksaGitOpsConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.GitOpsConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaGitOpsConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha1.GitOpsConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaGitOpsConfig indicates an expected call of GetEksaGitOpsConfig. +func (mr *MockKubernetesClientMockRecorder) GetEksaGitOpsConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaGitOpsConfig", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaGitOpsConfig), arg0, arg1, arg2, arg3) +} + +// GetEksaOIDCConfig mocks base method. +func (m *MockKubernetesClient) GetEksaOIDCConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.OIDCConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaOIDCConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha1.OIDCConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaOIDCConfig indicates an expected call of GetEksaOIDCConfig. +func (mr *MockKubernetesClientMockRecorder) GetEksaOIDCConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaOIDCConfig", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaOIDCConfig), arg0, arg1, arg2, arg3) +} + +// GetEksaVSphereDatacenterConfig mocks base method. +func (m *MockKubernetesClient) GetEksaVSphereDatacenterConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.VSphereDatacenterConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaVSphereDatacenterConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha1.VSphereDatacenterConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaVSphereDatacenterConfig indicates an expected call of GetEksaVSphereDatacenterConfig. +func (mr *MockKubernetesClientMockRecorder) GetEksaVSphereDatacenterConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaVSphereDatacenterConfig", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaVSphereDatacenterConfig), arg0, arg1, arg2, arg3) +} + +// GetEksaVSphereMachineConfig mocks base method. +func (m *MockKubernetesClient) GetEksaVSphereMachineConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.VSphereMachineConfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksaVSphereMachineConfig", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha1.VSphereMachineConfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksaVSphereMachineConfig indicates an expected call of GetEksaVSphereMachineConfig. +func (mr *MockKubernetesClientMockRecorder) GetEksaVSphereMachineConfig(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksaVSphereMachineConfig", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksaVSphereMachineConfig), arg0, arg1, arg2, arg3) +} + +// GetEksdRelease mocks base method. +func (m *MockKubernetesClient) GetEksdRelease(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha11.Release, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEksdRelease", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1alpha11.Release) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEksdRelease indicates an expected call of GetEksdRelease. +func (mr *MockKubernetesClientMockRecorder) GetEksdRelease(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEksdRelease", reflect.TypeOf((*MockKubernetesClient)(nil).GetEksdRelease), arg0, arg1, arg2, arg3) +} + +// GetKubeadmControlPlane mocks base method. +func (m *MockKubernetesClient) GetKubeadmControlPlane(arg0 context.Context, arg1 *types.Cluster, arg2 string, arg3 ...executables.KubectlOpt) (*v1beta10.KubeadmControlPlane, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetKubeadmControlPlane", varargs...) + ret0, _ := ret[0].(*v1beta10.KubeadmControlPlane) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetKubeadmControlPlane indicates an expected call of GetKubeadmControlPlane. +func (mr *MockKubernetesClientMockRecorder) GetKubeadmControlPlane(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetKubeadmControlPlane", reflect.TypeOf((*MockKubernetesClient)(nil).GetKubeadmControlPlane), varargs...) +} + +// GetMachineDeployment mocks base method. +func (m *MockKubernetesClient) GetMachineDeployment(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) (*v1beta1.MachineDeployment, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMachineDeployment", varargs...) + ret0, _ := ret[0].(*v1beta1.MachineDeployment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMachineDeployment indicates an expected call of GetMachineDeployment. +func (mr *MockKubernetesClientMockRecorder) GetMachineDeployment(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeployment", reflect.TypeOf((*MockKubernetesClient)(nil).GetMachineDeployment), varargs...) +} + +// GetMachineDeploymentsForCluster mocks base method. +func (m *MockKubernetesClient) GetMachineDeploymentsForCluster(arg0 context.Context, arg1 string, arg2 ...executables.KubectlOpt) ([]v1beta1.MachineDeployment, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetMachineDeploymentsForCluster", varargs...) + ret0, _ := ret[0].([]v1beta1.MachineDeployment) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMachineDeploymentsForCluster indicates an expected call of GetMachineDeploymentsForCluster. +func (mr *MockKubernetesClientMockRecorder) GetMachineDeploymentsForCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachineDeploymentsForCluster", reflect.TypeOf((*MockKubernetesClient)(nil).GetMachineDeploymentsForCluster), varargs...) +} + +// GetMachines mocks base method. +func (m *MockKubernetesClient) GetMachines(arg0 context.Context, arg1 *types.Cluster, arg2 string) ([]types.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMachines", arg0, arg1, arg2) + ret0, _ := ret[0].([]types.Machine) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMachines indicates an expected call of GetMachines. +func (mr *MockKubernetesClientMockRecorder) GetMachines(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMachines", reflect.TypeOf((*MockKubernetesClient)(nil).GetMachines), arg0, arg1, arg2) +} + +// KubeconfigSecretAvailable mocks base method. +func (m *MockKubernetesClient) KubeconfigSecretAvailable(arg0 context.Context, arg1, arg2, arg3 string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "KubeconfigSecretAvailable", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// KubeconfigSecretAvailable indicates an expected call of KubeconfigSecretAvailable. +func (mr *MockKubernetesClientMockRecorder) KubeconfigSecretAvailable(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KubeconfigSecretAvailable", reflect.TypeOf((*MockKubernetesClient)(nil).KubeconfigSecretAvailable), arg0, arg1, arg2, arg3) +} + +// ListObjects mocks base method. +func (m *MockKubernetesClient) ListObjects(arg0 context.Context, arg1, arg2, arg3 string, arg4 kubernetes.ObjectList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjects", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// ListObjects indicates an expected call of ListObjects. +func (mr *MockKubernetesClientMockRecorder) ListObjects(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockKubernetesClient)(nil).ListObjects), arg0, arg1, arg2, arg3, arg4) +} + +// PauseCAPICluster mocks base method. +func (m *MockKubernetesClient) PauseCAPICluster(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PauseCAPICluster", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// PauseCAPICluster indicates an expected call of PauseCAPICluster. +func (mr *MockKubernetesClientMockRecorder) PauseCAPICluster(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseCAPICluster", reflect.TypeOf((*MockKubernetesClient)(nil).PauseCAPICluster), arg0, arg1, arg2) +} + // RemoveAnnotationInNamespace mocks base method. func (m *MockKubernetesClient) RemoveAnnotationInNamespace(arg0 context.Context, arg1, arg2, arg3 string, arg4 *types.Cluster, arg5 string) error { m.ctrl.T.Helper() @@ -1098,6 +1539,48 @@ func (mr *MockKubernetesClientMockRecorder) RemoveAnnotationInNamespace(arg0, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAnnotationInNamespace", reflect.TypeOf((*MockKubernetesClient)(nil).RemoveAnnotationInNamespace), arg0, arg1, arg2, arg3, arg4, arg5) } +// ResumeCAPICluster mocks base method. +func (m *MockKubernetesClient) ResumeCAPICluster(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResumeCAPICluster", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResumeCAPICluster indicates an expected call of ResumeCAPICluster. +func (mr *MockKubernetesClientMockRecorder) ResumeCAPICluster(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResumeCAPICluster", reflect.TypeOf((*MockKubernetesClient)(nil).ResumeCAPICluster), arg0, arg1, arg2) +} + +// SaveLog mocks base method. +func (m *MockKubernetesClient) SaveLog(arg0 context.Context, arg1 *types.Cluster, arg2 *types.Deployment, arg3 string, arg4 filewriter.FileWriter) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveLog", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveLog indicates an expected call of SaveLog. +func (mr *MockKubernetesClientMockRecorder) SaveLog(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveLog", reflect.TypeOf((*MockKubernetesClient)(nil).SaveLog), arg0, arg1, arg2, arg3, arg4) +} + +// SetEksaControllerEnvVar mocks base method. +func (m *MockKubernetesClient) SetEksaControllerEnvVar(arg0 context.Context, arg1, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetEksaControllerEnvVar", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetEksaControllerEnvVar indicates an expected call of SetEksaControllerEnvVar. +func (mr *MockKubernetesClientMockRecorder) SetEksaControllerEnvVar(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEksaControllerEnvVar", reflect.TypeOf((*MockKubernetesClient)(nil).SetEksaControllerEnvVar), arg0, arg1, arg2, arg3) +} + // UpdateAnnotationInNamespace mocks base method. func (m *MockKubernetesClient) UpdateAnnotationInNamespace(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 *types.Cluster, arg5 string) error { m.ctrl.T.Helper() @@ -1112,6 +1595,104 @@ func (mr *MockKubernetesClientMockRecorder) UpdateAnnotationInNamespace(arg0, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotationInNamespace", reflect.TypeOf((*MockKubernetesClient)(nil).UpdateAnnotationInNamespace), arg0, arg1, arg2, arg3, arg4, arg5) } +// UpdateEnvironmentVariablesInNamespace mocks base method. +func (m *MockKubernetesClient) UpdateEnvironmentVariablesInNamespace(arg0 context.Context, arg1, arg2 string, arg3 map[string]string, arg4 *types.Cluster, arg5 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEnvironmentVariablesInNamespace", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateEnvironmentVariablesInNamespace indicates an expected call of UpdateEnvironmentVariablesInNamespace. +func (mr *MockKubernetesClientMockRecorder) UpdateEnvironmentVariablesInNamespace(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEnvironmentVariablesInNamespace", reflect.TypeOf((*MockKubernetesClient)(nil).UpdateEnvironmentVariablesInNamespace), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// ValidateControlPlaneNodes mocks base method. +func (m *MockKubernetesClient) ValidateControlPlaneNodes(arg0 context.Context, arg1 *types.Cluster, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateControlPlaneNodes", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidateControlPlaneNodes indicates an expected call of ValidateControlPlaneNodes. +func (mr *MockKubernetesClientMockRecorder) ValidateControlPlaneNodes(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateControlPlaneNodes", reflect.TypeOf((*MockKubernetesClient)(nil).ValidateControlPlaneNodes), arg0, arg1, arg2) +} + +// ValidateWorkerNodes mocks base method. +func (m *MockKubernetesClient) ValidateWorkerNodes(arg0 context.Context, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateWorkerNodes", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidateWorkerNodes indicates an expected call of ValidateWorkerNodes. +func (mr *MockKubernetesClientMockRecorder) ValidateWorkerNodes(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateWorkerNodes", reflect.TypeOf((*MockKubernetesClient)(nil).ValidateWorkerNodes), arg0, arg1, arg2) +} + +// WaitForClusterReady mocks base method. +func (m *MockKubernetesClient) WaitForClusterReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForClusterReady", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForClusterReady indicates an expected call of WaitForClusterReady. +func (mr *MockKubernetesClientMockRecorder) WaitForClusterReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForClusterReady", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForClusterReady), arg0, arg1, arg2, arg3) +} + +// WaitForControlPlaneAvailable mocks base method. +func (m *MockKubernetesClient) WaitForControlPlaneAvailable(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForControlPlaneAvailable", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForControlPlaneAvailable indicates an expected call of WaitForControlPlaneAvailable. +func (mr *MockKubernetesClientMockRecorder) WaitForControlPlaneAvailable(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForControlPlaneAvailable", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForControlPlaneAvailable), arg0, arg1, arg2, arg3) +} + +// WaitForControlPlaneNotReady mocks base method. +func (m *MockKubernetesClient) WaitForControlPlaneNotReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForControlPlaneNotReady", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForControlPlaneNotReady indicates an expected call of WaitForControlPlaneNotReady. +func (mr *MockKubernetesClientMockRecorder) WaitForControlPlaneNotReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForControlPlaneNotReady", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForControlPlaneNotReady), arg0, arg1, arg2, arg3) +} + +// WaitForControlPlaneReady mocks base method. +func (m *MockKubernetesClient) WaitForControlPlaneReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForControlPlaneReady", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForControlPlaneReady indicates an expected call of WaitForControlPlaneReady. +func (mr *MockKubernetesClientMockRecorder) WaitForControlPlaneReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForControlPlaneReady", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForControlPlaneReady), arg0, arg1, arg2, arg3) +} + // WaitForDeployment mocks base method. func (m *MockKubernetesClient) WaitForDeployment(arg0 context.Context, arg1 *types.Cluster, arg2, arg3, arg4, arg5 string) error { m.ctrl.T.Helper() @@ -1126,6 +1707,34 @@ func (mr *MockKubernetesClientMockRecorder) WaitForDeployment(arg0, arg1, arg2, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeployment", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForDeployment), arg0, arg1, arg2, arg3, arg4, arg5) } +// WaitForManagedExternalEtcdNotReady mocks base method. +func (m *MockKubernetesClient) WaitForManagedExternalEtcdNotReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForManagedExternalEtcdNotReady", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForManagedExternalEtcdNotReady indicates an expected call of WaitForManagedExternalEtcdNotReady. +func (mr *MockKubernetesClientMockRecorder) WaitForManagedExternalEtcdNotReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForManagedExternalEtcdNotReady", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForManagedExternalEtcdNotReady), arg0, arg1, arg2, arg3) +} + +// WaitForManagedExternalEtcdReady mocks base method. +func (m *MockKubernetesClient) WaitForManagedExternalEtcdReady(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForManagedExternalEtcdReady", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForManagedExternalEtcdReady indicates an expected call of WaitForManagedExternalEtcdReady. +func (mr *MockKubernetesClientMockRecorder) WaitForManagedExternalEtcdReady(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForManagedExternalEtcdReady", reflect.TypeOf((*MockKubernetesClient)(nil).WaitForManagedExternalEtcdReady), arg0, arg1, arg2, arg3) +} + // MockClientFactory is a mock of ClientFactory interface. type MockClientFactory struct { ctrl *gomock.Controller @@ -1200,3 +1809,83 @@ func (mr *MockClusterApplierMockRecorder) Run(arg0, arg1, arg2 interface{}) *gom mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockClusterApplier)(nil).Run), arg0, arg1, arg2) } + +// MockCAPIClient is a mock of CAPIClient interface. +type MockCAPIClient struct { + ctrl *gomock.Controller + recorder *MockCAPIClientMockRecorder +} + +// MockCAPIClientMockRecorder is the mock recorder for MockCAPIClient. +type MockCAPIClientMockRecorder struct { + mock *MockCAPIClient +} + +// NewMockCAPIClient creates a new mock instance. +func NewMockCAPIClient(ctrl *gomock.Controller) *MockCAPIClient { + mock := &MockCAPIClient{ctrl: ctrl} + mock.recorder = &MockCAPIClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCAPIClient) EXPECT() *MockCAPIClientMockRecorder { + return m.recorder +} + +// BackupManagement mocks base method. +func (m *MockCAPIClient) BackupManagement(arg0 context.Context, arg1 *types.Cluster, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BackupManagement", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// BackupManagement indicates an expected call of BackupManagement. +func (mr *MockCAPIClientMockRecorder) BackupManagement(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupManagement", reflect.TypeOf((*MockCAPIClient)(nil).BackupManagement), arg0, arg1, arg2, arg3) +} + +// GetWorkloadKubeconfig mocks base method. +func (m *MockCAPIClient) GetWorkloadKubeconfig(arg0 context.Context, arg1 string, arg2 *types.Cluster) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWorkloadKubeconfig", arg0, arg1, arg2) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWorkloadKubeconfig indicates an expected call of GetWorkloadKubeconfig. +func (mr *MockCAPIClientMockRecorder) GetWorkloadKubeconfig(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkloadKubeconfig", reflect.TypeOf((*MockCAPIClient)(nil).GetWorkloadKubeconfig), arg0, arg1, arg2) +} + +// InitInfrastructure mocks base method. +func (m *MockCAPIClient) InitInfrastructure(arg0 context.Context, arg1 *cluster.Spec, arg2 *types.Cluster, arg3 providers.Provider) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitInfrastructure", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitInfrastructure indicates an expected call of InitInfrastructure. +func (mr *MockCAPIClientMockRecorder) InitInfrastructure(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitInfrastructure", reflect.TypeOf((*MockCAPIClient)(nil).InitInfrastructure), arg0, arg1, arg2, arg3) +} + +// MoveManagement mocks base method. +func (m *MockCAPIClient) MoveManagement(arg0 context.Context, arg1, arg2 *types.Cluster, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MoveManagement", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// MoveManagement indicates an expected call of MoveManagement. +func (mr *MockCAPIClientMockRecorder) MoveManagement(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveManagement", reflect.TypeOf((*MockCAPIClient)(nil).MoveManagement), arg0, arg1, arg2, arg3) +} diff --git a/pkg/clustermanager/retrier_client.go b/pkg/clustermanager/retrier_client.go index 2414a4dea7f9..3086d4be7537 100644 --- a/pkg/clustermanager/retrier_client.go +++ b/pkg/clustermanager/retrier_client.go @@ -10,146 +10,146 @@ import ( "github.com/aws/eks-anywhere/pkg/types" ) -// RetrierClient wraps around a ClusterClient, offering retry functionality for some operations. -type RetrierClient struct { - *clusterManagerClient +// KubernetesRetrierClient wraps around a ClusterClient, offering retry functionality for some operations. +type KubernetesRetrierClient struct { + KubernetesClient retrier *retrier.Retrier } // NewRetrierClient constructs a new RetrierClient. -func NewRetrierClient(client ClusterClient, retrier *retrier.Retrier) *RetrierClient { - return &RetrierClient{ - clusterManagerClient: newClient(client), - retrier: retrier, +func NewRetrierClient(client KubernetesClient, retrier *retrier.Retrier) *KubernetesRetrierClient { + return &KubernetesRetrierClient{ + KubernetesClient: client, + retrier: retrier, } } // ApplyKubeSpecFromBytes creates/updates the objects defined in a yaml manifest against the api server following a client side apply mechanism. -func (c *RetrierClient) ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error { +func (c *KubernetesRetrierClient) ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error { return c.retrier.Retry( func() error { - return c.ClusterClient.ApplyKubeSpecFromBytes(ctx, cluster, data) + return c.KubernetesClient.ApplyKubeSpecFromBytes(ctx, cluster, data) }, ) } // Apply creates/updates an object against the api server following a client side apply mechanism. -func (c *RetrierClient) Apply(ctx context.Context, kubeconfigPath string, obj runtime.Object, opts ...kubernetes.KubectlApplyOption) error { +func (c *KubernetesRetrierClient) Apply(ctx context.Context, kubeconfigPath string, obj runtime.Object, opts ...kubernetes.KubectlApplyOption) error { return c.retrier.Retry( func() error { - return c.ClusterClient.Apply(ctx, kubeconfigPath, obj, opts...) + return c.KubernetesClient.Apply(ctx, kubeconfigPath, obj, opts...) }, ) } // PauseCAPICluster adds a `spec.Paused: true` to the CAPI cluster resource. This will cause all // downstream CAPI + provider controllers to skip reconciling on the paused cluster's objects. -func (c *RetrierClient) PauseCAPICluster(ctx context.Context, cluster, kubeconfig string) error { +func (c *KubernetesRetrierClient) PauseCAPICluster(ctx context.Context, cluster, kubeconfig string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.PauseCAPICluster(ctx, cluster, kubeconfig) + return c.KubernetesClient.PauseCAPICluster(ctx, cluster, kubeconfig) }, ) } // ResumeCAPICluster removes the `spec.Paused` on the CAPI cluster resource. This will cause all // downstream CAPI + provider controllers to resume reconciling on the paused cluster's objects. -func (c *RetrierClient) ResumeCAPICluster(ctx context.Context, cluster, kubeconfig string) error { +func (c *KubernetesRetrierClient) ResumeCAPICluster(ctx context.Context, cluster, kubeconfig string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.ResumeCAPICluster(ctx, cluster, kubeconfig) + return c.KubernetesClient.ResumeCAPICluster(ctx, cluster, kubeconfig) }, ) } // ApplyKubeSpecFromBytesForce creates/updates the objects defined in a yaml manifest against the api server following a client side apply mechanism. // It forces the operation, so if api validation failed, it will delete and re-create the object. -func (c *RetrierClient) ApplyKubeSpecFromBytesForce(ctx context.Context, cluster *types.Cluster, data []byte) error { +func (c *KubernetesRetrierClient) ApplyKubeSpecFromBytesForce(ctx context.Context, cluster *types.Cluster, data []byte) error { return c.retrier.Retry( func() error { - return c.ClusterClient.ApplyKubeSpecFromBytesForce(ctx, cluster, data) + return c.KubernetesClient.ApplyKubeSpecFromBytesForce(ctx, cluster, data) }, ) } // ApplyKubeSpecFromBytesWithNamespace creates/updates the objects defined in a yaml manifest against the api server following a client side apply mechanism. // It applies all objects in the given namespace. -func (c *RetrierClient) ApplyKubeSpecFromBytesWithNamespace(ctx context.Context, cluster *types.Cluster, data []byte, namespace string) error { +func (c *KubernetesRetrierClient) ApplyKubeSpecFromBytesWithNamespace(ctx context.Context, cluster *types.Cluster, data []byte, namespace string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, data, namespace) + return c.KubernetesClient.ApplyKubeSpecFromBytesWithNamespace(ctx, cluster, data, namespace) }, ) } // UpdateAnnotationInNamespace adds/updates an annotation for the given kubernetes resource. -func (c *RetrierClient) UpdateAnnotationInNamespace(ctx context.Context, resourceType, objectName string, annotations map[string]string, cluster *types.Cluster, namespace string) error { +func (c *KubernetesRetrierClient) UpdateAnnotationInNamespace(ctx context.Context, resourceType, objectName string, annotations map[string]string, cluster *types.Cluster, namespace string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.UpdateAnnotationInNamespace(ctx, resourceType, objectName, annotations, cluster, namespace) + return c.KubernetesClient.UpdateAnnotationInNamespace(ctx, resourceType, objectName, annotations, cluster, namespace) }, ) } // RemoveAnnotationInNamespace deletes an annotation for the given kubernetes resource if present. -func (c *RetrierClient) RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error { +func (c *KubernetesRetrierClient) RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.RemoveAnnotationInNamespace(ctx, resourceType, objectName, key, cluster, namespace) + return c.KubernetesClient.RemoveAnnotationInNamespace(ctx, resourceType, objectName, key, cluster, namespace) }, ) } // ListObjects reads all Objects of a particular resource type in a namespace. -func (c *RetrierClient) ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error { +func (c *KubernetesRetrierClient) ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error { return c.retrier.Retry( func() error { - return c.ClusterClient.ListObjects(ctx, resourceType, namespace, kubeconfig, list) + return c.KubernetesClient.ListObjects(ctx, resourceType, namespace, kubeconfig, list) }, ) } // DeleteGitOpsConfig deletes a GitOpsConfigObject from the cluster. -func (c *RetrierClient) DeleteGitOpsConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { +func (c *KubernetesRetrierClient) DeleteGitOpsConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.DeleteGitOpsConfig(ctx, cluster, name, namespace) + return c.KubernetesClient.DeleteGitOpsConfig(ctx, cluster, name, namespace) }, ) } // DeleteEKSACluster deletes an EKSA Cluster object from the cluster. -func (c *RetrierClient) DeleteEKSACluster(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { +func (c *KubernetesRetrierClient) DeleteEKSACluster(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.DeleteEKSACluster(ctx, cluster, name, namespace) + return c.KubernetesClient.DeleteEKSACluster(ctx, cluster, name, namespace) }, ) } // DeleteAWSIamConfig deletes an AWSIamConfig object from the cluster. -func (c *RetrierClient) DeleteAWSIamConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { +func (c *KubernetesRetrierClient) DeleteAWSIamConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.DeleteAWSIamConfig(ctx, cluster, name, namespace) + return c.KubernetesClient.DeleteAWSIamConfig(ctx, cluster, name, namespace) }, ) } // DeleteOIDCConfig deletes a OIDCConfig object from the cluster. -func (c *RetrierClient) DeleteOIDCConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { +func (c *KubernetesRetrierClient) DeleteOIDCConfig(ctx context.Context, cluster *types.Cluster, name string, namespace string) error { return c.retrier.Retry( func() error { - return c.ClusterClient.DeleteOIDCConfig(ctx, cluster, name, namespace) + return c.KubernetesClient.DeleteOIDCConfig(ctx, cluster, name, namespace) }, ) } // DeleteCluster deletes a CAPI Cluster from the cluster. -func (c *RetrierClient) DeleteCluster(ctx context.Context, cluster, clusterToDelete *types.Cluster) error { +func (c *KubernetesRetrierClient) DeleteCluster(ctx context.Context, cluster, clusterToDelete *types.Cluster) error { return c.retrier.Retry( func() error { - return c.ClusterClient.DeleteCluster(ctx, cluster, clusterToDelete) + return c.KubernetesClient.DeleteCluster(ctx, cluster, clusterToDelete) }, ) } diff --git a/pkg/clustermanager/retrier_client_test.go b/pkg/clustermanager/retrier_client_test.go new file mode 100644 index 000000000000..d67507c395c3 --- /dev/null +++ b/pkg/clustermanager/retrier_client_test.go @@ -0,0 +1,191 @@ +package clustermanager_test + +import ( + "context" + "errors" + "testing" + + . "github.com/onsi/gomega" + + "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clustermanager" + "github.com/aws/eks-anywhere/pkg/clustermanager/mocks" + "github.com/aws/eks-anywhere/pkg/retrier" + "github.com/aws/eks-anywhere/pkg/types" + "github.com/golang/mock/gomock" +) + +func TestApplyKubeSpecFromBytes(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + cluster := types.Cluster{} + + client.EXPECT().ApplyKubeSpecFromBytes(context.Background(), &cluster, []byte{}).MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.ApplyKubeSpecFromBytes(context.Background(), &cluster, []byte{}) + tt.Expect(err).To(HaveOccurred()) +} + +func TestApply(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + cluster := v1alpha1.Cluster{} + + client.EXPECT().Apply(context.Background(), "", &cluster).MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.Apply(context.Background(), "", &cluster) + tt.Expect(err).To(HaveOccurred()) +} + +func TestPauseCAPICluster(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().PauseCAPICluster(context.Background(), "cluster", "kubeconfig").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.PauseCAPICluster(context.Background(), "cluster", "kubeconfig") + tt.Expect(err).To(HaveOccurred()) +} + +func TestResumeCAPICluster(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().ResumeCAPICluster(context.Background(), "cluster", "kubeconfig").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.ResumeCAPICluster(context.Background(), "cluster", "kubeconfig") + tt.Expect(err).To(HaveOccurred()) +} + +func TestApplyKubeSpecFromBytesForce(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + cluster := types.Cluster{} + + client.EXPECT().ApplyKubeSpecFromBytesForce(context.Background(), &cluster, []byte{}).MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.ApplyKubeSpecFromBytesForce(context.Background(), &cluster, []byte{}) + tt.Expect(err).To(HaveOccurred()) +} + +func TestApplyKubeSpecFromBytesWithNamespace(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + cluster := types.Cluster{} + + client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(context.Background(), &cluster, []byte{}, "test").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.ApplyKubeSpecFromBytesWithNamespace(context.Background(), &cluster, []byte{}, "test") + tt.Expect(err).To(HaveOccurred()) +} + +func TestUpdateAnnotationInNamespace(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + cluster := types.Cluster{} + testMap := map[string]string{} + + client.EXPECT().UpdateAnnotationInNamespace(context.Background(), "test", "test", testMap, &cluster, "namespace").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.UpdateAnnotationInNamespace(context.Background(), "test", "test", testMap, &cluster, "namespace") + tt.Expect(err).To(HaveOccurred()) +} + +func TestRemoveAnnotationInNamespace(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + cluster := types.Cluster{} + + client.EXPECT().RemoveAnnotationInNamespace(context.Background(), "resourceType", "objectName", "key", &cluster, "namespace").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.RemoveAnnotationInNamespace(context.Background(), "resourceType", "objectName", "key", &cluster, "namespace") + tt.Expect(err).To(HaveOccurred()) +} + +func TestListObjects(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().ListObjects(context.Background(), "resourceType", "namespace", "kubeconfig", &v1alpha1.ClusterList{}).MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.ListObjects(context.Background(), "resourceType", "namespace", "kubeconfig", &v1alpha1.ClusterList{}) + tt.Expect(err).To(HaveOccurred()) +} + +func TestDeleteGitOpsConfig(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().DeleteGitOpsConfig(context.Background(), &types.Cluster{}, "name", "namespace").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.DeleteGitOpsConfig(context.Background(), &types.Cluster{}, "name", "namespace") + tt.Expect(err).To(HaveOccurred()) +} + +func TestDeleteEKSACluster(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().DeleteEKSACluster(context.Background(), &types.Cluster{}, "name", "namespace").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.DeleteEKSACluster(context.Background(), &types.Cluster{}, "name", "namespace") + tt.Expect(err).To(HaveOccurred()) +} + +func TestDeleteAWSIamConfig(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().DeleteAWSIamConfig(context.Background(), &types.Cluster{}, "name", "namespace").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.DeleteAWSIamConfig(context.Background(), &types.Cluster{}, "name", "namespace") + tt.Expect(err).To(HaveOccurred()) +} + +func TestDeleteOIDCConfig(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().DeleteOIDCConfig(context.Background(), &types.Cluster{}, "name", "namespace").MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.DeleteOIDCConfig(context.Background(), &types.Cluster{}, "name", "namespace") + tt.Expect(err).To(HaveOccurred()) +} + +func TestDeleteCluster(t *testing.T) { + tt := NewWithT(t) + ctrl := gomock.NewController(t) + client := mocks.NewMockKubernetesClient(ctrl) + + client.EXPECT().DeleteCluster(context.Background(), &types.Cluster{}, &types.Cluster{}).MinTimes(2).Return(errors.New("this is an error")) + retrierClient := clustermanager.NewRetrierClient(client, retrier.NewWithMaxRetries(2, 0)) + + err := retrierClient.DeleteCluster(context.Background(), &types.Cluster{}, &types.Cluster{}) + tt.Expect(err).To(HaveOccurred()) +} \ No newline at end of file diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 11fc883c123b..adb249b8d2e4 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -81,6 +81,7 @@ type Dependencies struct { CiliumTemplater *cilium.Templater AwsIamAuth *awsiamauth.Installer ClusterManager *clustermanager.ClusterManager + EksaInstaller *clustermanager.EKSAInstaller Bootstrapper *bootstrapper.Bootstrapper GitOpsFlux *flux.Flux Git *gitfactory.GitTools @@ -1023,7 +1024,7 @@ func (f *Factory) WithBootstrapper() *Factory { type clusterManagerClient struct { *executables.Clusterctl - *executables.Kubectl + *clustermanager.KubernetesRetrierClient } // ClusterManagerTimeoutOptions maintains the timeout options for cluster manager. @@ -1063,7 +1064,7 @@ func (f *Factory) clusterManagerOpts(timeoutOpts *ClusterManagerTimeoutOptions) // WithClusterManager builds a cluster manager based on the cluster config and timeout options. func (f *Factory) WithClusterManager(clusterConfig *v1alpha1.Cluster, timeoutOpts *ClusterManagerTimeoutOptions) *Factory { - f.WithClusterctl().WithKubectl().WithNetworking(clusterConfig).WithWriter().WithDiagnosticBundleFactory().WithAwsIamAuth().WithFileReader().WithUnAuthKubeClient() + f.WithClusterctl().WithKubectl().WithNetworking(clusterConfig).WithWriter().WithDiagnosticBundleFactory().WithAwsIamAuth().WithFileReader().WithUnAuthKubeClient().WithEKSAInstaller() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { if f.dependencies.ClusterManager != nil { @@ -1077,15 +1078,15 @@ func (f *Factory) WithClusterManager(clusterConfig *v1alpha1.Cluster, timeoutOpt r = clustermanager.DefaultRetrier() } - client := clustermanager.NewRetrierClient( - &clusterManagerClient{ - f.dependencies.Clusterctl, - f.dependencies.Kubectl, - }, + retrierClient := clustermanager.NewRetrierClient( + f.dependencies.Kubectl, r, ) - installer := clustermanager.NewEKSAInstaller(client, f.dependencies.FileReader, f.eksaInstallerOpts()...) + client := clusterManagerClient{ + f.dependencies.Clusterctl, + retrierClient, + } f.dependencies.ClusterManager = clustermanager.New( f.dependencies.UnAuthKubeClient, @@ -1094,7 +1095,7 @@ func (f *Factory) WithClusterManager(clusterConfig *v1alpha1.Cluster, timeoutOpt f.dependencies.Writer, f.dependencies.DignosticCollectorFactory, f.dependencies.AwsIamAuth, - installer, + f.dependencies.EksaInstaller, f.clusterManagerOpts(timeoutOpts)..., ) return nil @@ -1103,6 +1104,36 @@ func (f *Factory) WithClusterManager(clusterConfig *v1alpha1.Cluster, timeoutOpt return f } +// WithEKSAInstaller builds a cluster manager based on the cluster config and timeout options. +func (f *Factory) WithEKSAInstaller() *Factory { + f.WithClusterctl().WithKubectl().WithFileReader() + + f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { + if f.dependencies.EksaInstaller != nil { + return nil + } + + var r *retrier.Retrier + if f.config.noTimeouts { + r = retrier.NewWithNoTimeout() + } else { + r = clustermanager.DefaultRetrier() + } + + client := clustermanager.NewRetrierClient( + f.dependencies.Kubectl, + r, + ) + + installer := clustermanager.NewEKSAInstaller(client, f.dependencies.FileReader, f.eksaInstallerOpts()...) + + f.dependencies.EksaInstaller = installer + return nil + }) + + return f +} + // WithNoTimeouts injects no timeouts to all the dependencies with configurable timeout. // Calling this method sets no timeout for the waits and retries in all the // cluster operations, i.e. cluster manager, eksa installer, networking installer. diff --git a/pkg/dependencies/factory_test.go b/pkg/dependencies/factory_test.go index f2b992dc67dc..ac32446e4849 100644 --- a/pkg/dependencies/factory_test.go +++ b/pkg/dependencies/factory_test.go @@ -256,6 +256,18 @@ func TestFactoryBuildWithClusterManager(t *testing.T) { tt.Expect(deps.ClusterManager).NotTo(BeNil()) } +func TestFactoryBuildWithEksaInstaller(t *testing.T) { + tt := newTest(t, vsphere) + deps, err := dependencies.NewFactory(). + WithLocalExecutables(). + WithCliConfig(&tt.cliConfig). + WithEKSAInstaller(). + Build(context.Background()) + + tt.Expect(err).To(BeNil()) + tt.Expect(deps.EksaInstaller).NotTo(BeNil()) +} + func TestFactoryBuildWithHelmEnvClientFactory(t *testing.T) { tt := newTest(t, vsphere) deps, err := dependencies.NewFactory(). @@ -302,6 +314,7 @@ func TestFactoryBuildWithMultipleDependencies(t *testing.T) { WithBootstrapper(). WithCliConfig(&tt.cliConfig). WithClusterManager(tt.clusterSpec.Cluster, timeoutOpts). + WithEKSAInstaller(). WithProvider(tt.clusterConfigFile, tt.clusterSpec.Cluster, false, tt.hardwareConfigFile, false, tt.tinkerbellBootstrapIP, map[string]bool{}, tt.providerOptions). WithGitOpsFlux(tt.clusterSpec.Cluster, tt.clusterSpec.FluxConfig, nil). WithWriter(). diff --git a/pkg/task/task.go b/pkg/task/task.go index 96124973cddb..b1feda83afb9 100644 --- a/pkg/task/task.go +++ b/pkg/task/task.go @@ -36,6 +36,7 @@ type CommandContext struct { Writer filewriter.FileWriter EksdInstaller interfaces.EksdInstaller PackageInstaller interfaces.PackageInstaller + EksaInstaller interfaces.EksaInstaller EksdUpgrader interfaces.EksdUpgrader ClusterUpgrader interfaces.ClusterUpgrader ClusterCreator interfaces.ClusterCreator diff --git a/pkg/workflows/interfaces/interfaces.go b/pkg/workflows/interfaces/interfaces.go index f826dbb3910b..a334083031b2 100644 --- a/pkg/workflows/interfaces/interfaces.go +++ b/pkg/workflows/interfaces/interfaces.go @@ -3,6 +3,8 @@ package interfaces import ( "context" + "github.com/go-logr/logr" + "github.com/aws/eks-anywhere/pkg/bootstrapper" "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" @@ -99,3 +101,8 @@ type ClusterCreator interface { Run(ctx context.Context, spec *cluster.Spec, managementCluster types.Cluster) error CreateSync(ctx context.Context, spec *cluster.Spec, managementCluster *types.Cluster) (*types.Cluster, error) } + +// EksaInstaller exposes the EKSA installer methods. +type EksaInstaller interface { + Install(ctx context.Context, log logr.Logger, cluster *types.Cluster, spec *cluster.Spec) error +} diff --git a/pkg/workflows/interfaces/mocks/clients.go b/pkg/workflows/interfaces/mocks/clients.go index a285ff3a2ed8..a9ac4c13b0ea 100644 --- a/pkg/workflows/interfaces/mocks/clients.go +++ b/pkg/workflows/interfaces/mocks/clients.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory) +// Source: github.com/aws/eks-anywhere/pkg/workflows/interfaces (interfaces: Bootstrapper,ClusterManager,GitOpsManager,Validator,CAPIManager,EksdInstaller,EksdUpgrader,PackageInstaller,ClusterUpgrader,ClusterCreator,ClientFactory,EksaInstaller) // Package mocks is a generated GoMock package. package mocks @@ -15,6 +15,7 @@ import ( providers "github.com/aws/eks-anywhere/pkg/providers" types "github.com/aws/eks-anywhere/pkg/types" validations "github.com/aws/eks-anywhere/pkg/validations" + logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" ) @@ -1003,3 +1004,40 @@ func (mr *MockClientFactoryMockRecorder) BuildClientFromKubeconfig(arg0 interfac mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildClientFromKubeconfig", reflect.TypeOf((*MockClientFactory)(nil).BuildClientFromKubeconfig), arg0) } + +// MockEksaInstaller is a mock of EksaInstaller interface. +type MockEksaInstaller struct { + ctrl *gomock.Controller + recorder *MockEksaInstallerMockRecorder +} + +// MockEksaInstallerMockRecorder is the mock recorder for MockEksaInstaller. +type MockEksaInstallerMockRecorder struct { + mock *MockEksaInstaller +} + +// NewMockEksaInstaller creates a new mock instance. +func NewMockEksaInstaller(ctrl *gomock.Controller) *MockEksaInstaller { + mock := &MockEksaInstaller{ctrl: ctrl} + mock.recorder = &MockEksaInstallerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEksaInstaller) EXPECT() *MockEksaInstallerMockRecorder { + return m.recorder +} + +// Install mocks base method. +func (m *MockEksaInstaller) Install(arg0 context.Context, arg1 logr.Logger, arg2 *types.Cluster, arg3 *cluster.Spec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Install", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Install indicates an expected call of Install. +func (mr *MockEksaInstallerMockRecorder) Install(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Install", reflect.TypeOf((*MockEksaInstaller)(nil).Install), arg0, arg1, arg2, arg3) +} diff --git a/pkg/workflows/management/create.go b/pkg/workflows/management/create.go index 02e0460ce4e2..e8f5301ab652 100644 --- a/pkg/workflows/management/create.go +++ b/pkg/workflows/management/create.go @@ -20,6 +20,7 @@ type Create struct { eksdInstaller interfaces.EksdInstaller packageInstaller interfaces.PackageInstaller clusterCreator interfaces.ClusterCreator + eksaInstaller interfaces.EksaInstaller } // NewCreate builds a new create construct. @@ -27,7 +28,7 @@ func NewCreate(bootstrapper interfaces.Bootstrapper, provider providers.Provider clusterManager interfaces.ClusterManager, gitOpsManager interfaces.GitOpsManager, writer filewriter.FileWriter, eksdInstaller interfaces.EksdInstaller, packageInstaller interfaces.PackageInstaller, - clusterCreator interfaces.ClusterCreator, + clusterCreator interfaces.ClusterCreator, eksaInstaller interfaces.EksaInstaller, ) *Create { return &Create{ bootstrapper: bootstrapper, @@ -38,6 +39,7 @@ func NewCreate(bootstrapper interfaces.Bootstrapper, provider providers.Provider eksdInstaller: eksdInstaller, packageInstaller: packageInstaller, clusterCreator: clusterCreator, + eksaInstaller: eksaInstaller, } } @@ -54,6 +56,7 @@ func (c *Create) Run(ctx context.Context, clusterSpec *cluster.Spec, validator i EksdInstaller: c.eksdInstaller, PackageInstaller: c.packageInstaller, ClusterCreator: c.clusterCreator, + EksaInstaller: c.eksaInstaller, } return task.NewTaskRunner(&setupAndValidateCreate{}, c.writer).RunTask(ctx, commandContext) diff --git a/pkg/workflows/management/create_bootstrap.go b/pkg/workflows/management/create_bootstrap.go new file mode 100644 index 000000000000..139ad9e7746d --- /dev/null +++ b/pkg/workflows/management/create_bootstrap.go @@ -0,0 +1,41 @@ +package management + +import ( + "context" + + "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/task" +) + +type createBootStrapClusterTask struct{} + +func (s *createBootStrapClusterTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { + logger.Info("Creating new bootstrap cluster") + + bootstrapOptions, err := commandContext.Provider.BootstrapClusterOpts(commandContext.ClusterSpec) + if err != nil { + commandContext.SetError(err) + return nil + } + + bootstrapCluster, err := commandContext.Bootstrapper.CreateBootstrapCluster(ctx, commandContext.ClusterSpec, bootstrapOptions...) + if err != nil { + commandContext.SetError(err) + return nil + } + commandContext.BootstrapCluster = bootstrapCluster + + return &installCAPIComponentsTask{} +} + +func (s *createBootStrapClusterTask) Name() string { + return "bootstrap-cluster-init" +} + +func (s *createBootStrapClusterTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) { + return nil, nil +} + +func (s *createBootStrapClusterTask) Checkpoint() *task.CompletedTask { + return nil +} diff --git a/pkg/workflows/management/create_install_capi.go b/pkg/workflows/management/create_install_capi.go new file mode 100644 index 000000000000..cdc28babbc24 --- /dev/null +++ b/pkg/workflows/management/create_install_capi.go @@ -0,0 +1,45 @@ +package management + +import ( + "context" + + "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/task" + "github.com/aws/eks-anywhere/pkg/workflows" +) + +type installCAPIComponentsTask struct{} + +func (s *installCAPIComponentsTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { + logger.Info("Provider specific pre-capi-install-setup on bootstrap cluster") + if err := commandContext.Provider.PreCAPIInstallOnBootstrap(ctx, commandContext.BootstrapCluster, commandContext.ClusterSpec); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + logger.Info("Installing cluster-api providers on bootstrap cluster") + if err := commandContext.ClusterManager.InstallCAPI(ctx, commandContext.ClusterSpec, commandContext.BootstrapCluster, commandContext.Provider); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + logger.Info("Provider specific post-setup") + if err := commandContext.Provider.PostBootstrapSetup(ctx, commandContext.ClusterSpec.Cluster, commandContext.BootstrapCluster); err != nil { + commandContext.SetError(err) + return &workflows.CollectMgmtClusterDiagnosticsTask{} + } + + return &installEksaComponentsOnBootstrapTask{} +} + +func (s *installCAPIComponentsTask) Name() string { + return "install-capi-components-bootstrap" +} + +func (s *installCAPIComponentsTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) { + return nil, nil +} + +func (s *installCAPIComponentsTask) Checkpoint() *task.CompletedTask { + return nil +} diff --git a/pkg/workflows/management/create_install_eksa.go b/pkg/workflows/management/create_install_eksa.go new file mode 100644 index 000000000000..199473161d61 --- /dev/null +++ b/pkg/workflows/management/create_install_eksa.go @@ -0,0 +1,61 @@ +package management + +import ( + "context" + + "github.com/aws/eks-anywhere/pkg/logger" + "github.com/aws/eks-anywhere/pkg/task" + "github.com/aws/eks-anywhere/pkg/types" + "github.com/aws/eks-anywhere/pkg/workflows" +) + +type installEksaComponentsOnBootstrapTask struct{} + +func (s *installEksaComponentsOnBootstrapTask) Run(ctx context.Context, commandContext *task.CommandContext) task.Task { + logger.Info("Installing EKS-A custom components on bootstrap cluster") + err := installEKSAComponents(ctx, commandContext, commandContext.BootstrapCluster) + if err != nil { + commandContext.SetError(err) + return &workflows.CollectDiagnosticsTask{} + } + + return nil +} + +func (s *installEksaComponentsOnBootstrapTask) Name() string { + return "eksa-components-bootstrap-install" +} + +func (s *installEksaComponentsOnBootstrapTask) Restore(ctx context.Context, commandContext *task.CommandContext, completedTask *task.CompletedTask) (task.Task, error) { + return nil, nil +} + +func (s *installEksaComponentsOnBootstrapTask) Checkpoint() *task.CompletedTask { + return nil +} + +func installEKSAComponents(ctx context.Context, commandContext *task.CommandContext, targetCluster *types.Cluster) error { + logger.Info("Installing EKS-D components") + err := commandContext.EksdInstaller.InstallEksdCRDs(ctx, commandContext.ClusterSpec, targetCluster) + if err != nil { + commandContext.SetError(err) + return err + } + + logger.Info("Creating EKS-A CRDs instances") + + logger.Info("Installing EKS-A custom components (CRD and controller)") + err = commandContext.ClusterManager.InstallCustomComponents(ctx, commandContext.ClusterSpec, targetCluster, commandContext.Provider) + if err != nil { + commandContext.SetError(err) + return err + } + + err = commandContext.EksdInstaller.InstallEksdManifest(ctx, commandContext.ClusterSpec, targetCluster) + if err != nil { + commandContext.SetError(err) + return err + } + + return nil +} diff --git a/pkg/workflows/management/create_test.go b/pkg/workflows/management/create_test.go index 4f02f95fd630..248e4907b25d 100644 --- a/pkg/workflows/management/create_test.go +++ b/pkg/workflows/management/create_test.go @@ -2,12 +2,15 @@ package management_test import ( "context" + "errors" + "fmt" "testing" "github.com/golang/mock/gomock" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/bootstrapper" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/features" writermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks" @@ -49,6 +52,7 @@ func newCreateTest(t *testing.T) *createTestSetup { writer := writermocks.NewMockFileWriter(mockCtrl) eksdInstaller := mocks.NewMockEksdInstaller(mockCtrl) packageInstaller := mocks.NewMockPackageInstaller(mockCtrl) + eksaInstaller := mocks.NewMockEksaInstaller(mockCtrl) datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} machineConfigs := []providers.MachineConfig{&v1alpha1.VSphereMachineConfig{}} @@ -64,6 +68,7 @@ func newCreateTest(t *testing.T) *createTestSetup { eksdInstaller, packageInstaller, clusterCreator, + eksaInstaller, ) for _, e := range featureEnvVars { @@ -105,13 +110,241 @@ func (c *createTestSetup) expectPreflightValidationsToPass() { c.validator.EXPECT().PreflightValidations(c.ctx) } +func (c *createTestSetup) expectCreateBootstrap() { + opts := []bootstrapper.BootstrapClusterOption{bootstrapper.WithExtraDockerMounts()} + + gomock.InOrder( + c.provider.EXPECT().BootstrapClusterOpts( + c.clusterSpec).Return(opts, nil), + // Checking for not nil because in go you can't compare closures + c.bootstrapper.EXPECT().CreateBootstrapCluster( + c.ctx, c.clusterSpec, gomock.Not(gomock.Nil()), + ).Return(c.bootstrapCluster, nil), + ) +} + +func (c *createTestSetup) expectCAPIInstall(err1, err2, err3 error) { + gomock.InOrder( + c.provider.EXPECT().PreCAPIInstallOnBootstrap( + c.ctx, c.bootstrapCluster, c.clusterSpec).Return(err1), + + c.clusterManager.EXPECT().InstallCAPI( + c.ctx, c.clusterSpec, c.bootstrapCluster, c.provider).Return(err2), + + c.provider.EXPECT().PostBootstrapSetup( + c.ctx, c.clusterSpec.Cluster, c.bootstrapCluster).Return(err3), + ) +} + +func (c *createTestSetup) expectInstallEksaComponentsBootstrap(err1, err2, err3 error) { + gomock.InOrder( + + c.eksdInstaller.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.bootstrapCluster).Return(err1), + + c.clusterManager.EXPECT().InstallCustomComponents( + c.ctx, c.clusterSpec, c.bootstrapCluster, c.provider).Return(err2), + + c.eksdInstaller.EXPECT().InstallEksdManifest( + c.ctx, c.clusterSpec, c.bootstrapCluster).Return(err3), + ) +} + func TestCreateRunSuccess(t *testing.T) { test := newCreateTest(t) test.expectSetup() test.expectPreflightValidationsToPass() + test.expectCreateBootstrap() + test.expectCAPIInstall(nil, nil, nil) + test.expectInstallEksaComponentsBootstrap(nil, nil, nil) err := test.run() if err != nil { t.Fatalf("Create.Run() err = %v, want err = nil", err) } } + +func TestCreateBootstrapOptsFailure(t *testing.T) { + c := newCreateTest(t) + c.expectSetup() + c.expectPreflightValidationsToPass() + + err := errors.New("test") + + opts := []bootstrapper.BootstrapClusterOption{} + + gomock.InOrder( + c.provider.EXPECT().BootstrapClusterOpts( + c.clusterSpec).Return(opts, err), + ) + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err = c.run() + if err == nil { + t.Fatalf("expected error from task") + } +} + +func TestCreateValidationsFailure(t *testing.T) { + c := newCreateTest(t) + err := errors.New("test") + + c.provider.EXPECT().SetupAndValidateCreateCluster(c.ctx, c.clusterSpec).Return(err) + c.provider.EXPECT().Name() + c.gitOpsManager.EXPECT().Validations(c.ctx, c.clusterSpec) + + c.validator.EXPECT().PreflightValidations(c.ctx) + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err = c.run() + if err == nil { + t.Fatalf("expected error from task") + } +} + +func TestCreateBootstrapFailure(t *testing.T) { + c := newCreateTest(t) + c.expectSetup() + c.expectPreflightValidationsToPass() + + err := errors.New("test") + + opts := []bootstrapper.BootstrapClusterOption{} + + gomock.InOrder( + c.provider.EXPECT().BootstrapClusterOpts( + c.clusterSpec).Return(opts, nil), + c.bootstrapper.EXPECT().CreateBootstrapCluster( + c.ctx, c.clusterSpec, gomock.Not(gomock.Nil()), + ).Return(nil, err), + ) + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err = c.run() + if err == nil { + t.Fatalf("expected error from task") + } +} + +func TestCreatePreCAPIFailure(t *testing.T) { + c := newCreateTest(t) + c.expectSetup() + c.expectCreateBootstrap() + c.expectPreflightValidationsToPass() + + c.provider.EXPECT().PreCAPIInstallOnBootstrap( + c.ctx, c.bootstrapCluster, c.clusterSpec).Return(errors.New("test")) + + c.clusterManager.EXPECT().SaveLogsManagementCluster(c.ctx, c.clusterSpec, c.bootstrapCluster) + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err := c.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateInstallCAPIFailure(t *testing.T) { + c := newCreateTest(t) + c.expectSetup() + c.expectCreateBootstrap() + c.expectPreflightValidationsToPass() + + gomock.InOrder( + c.provider.EXPECT().PreCAPIInstallOnBootstrap( + c.ctx, c.bootstrapCluster, c.clusterSpec), + + c.clusterManager.EXPECT().InstallCAPI( + c.ctx, c.clusterSpec, c.bootstrapCluster, c.provider).Return(errors.New("test")), + ) + + c.clusterManager.EXPECT().SaveLogsManagementCluster(c.ctx, c.clusterSpec, c.bootstrapCluster) + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err := c.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreatePostCAPIFailure(t *testing.T) { + c := newCreateTest(t) + err := errors.New("test") + c.expectSetup() + c.expectCreateBootstrap() + c.expectPreflightValidationsToPass() + c.expectCAPIInstall(nil, nil, err) + + c.clusterManager.EXPECT().SaveLogsManagementCluster(c.ctx, c.clusterSpec, c.bootstrapCluster) + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err = c.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateInstallCustomComponentsFailure(t *testing.T) { + c := newCreateTest(t) + c.expectSetup() + c.expectCreateBootstrap() + c.expectPreflightValidationsToPass() + c.expectCAPIInstall(nil, nil, nil) + + c.eksdInstaller.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.bootstrapCluster).Return(nil) + + c.clusterManager.EXPECT().InstallCustomComponents( + c.ctx, c.clusterSpec, c.bootstrapCluster, c.provider).Return(errors.New("test")) + + c.clusterManager.EXPECT().SaveLogsManagementCluster(c.ctx, c.clusterSpec, c.bootstrapCluster) + c.clusterManager.EXPECT().SaveLogsWorkloadCluster(c.ctx, c.provider, c.clusterSpec, nil) + + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err := c.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateInstallCRDFailure(t *testing.T) { + c := newCreateTest(t) + c.expectSetup() + c.expectCreateBootstrap() + c.expectPreflightValidationsToPass() + c.expectCAPIInstall(nil, nil, nil) + + gomock.InOrder( + c.eksdInstaller.EXPECT().InstallEksdCRDs(c.ctx, c.clusterSpec, c.bootstrapCluster).Return(errors.New("test")), + ) + + c.clusterManager.EXPECT().SaveLogsManagementCluster(c.ctx, c.clusterSpec, c.bootstrapCluster) + c.clusterManager.EXPECT().SaveLogsWorkloadCluster(c.ctx, c.provider, c.clusterSpec, nil) + + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err := c.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} + +func TestCreateInstallEksdManifestFailure(t *testing.T) { + c := newCreateTest(t) + c.expectSetup() + c.expectCreateBootstrap() + c.expectPreflightValidationsToPass() + c.expectCAPIInstall(nil, nil, nil) + + err := errors.New("test") + c.expectInstallEksaComponentsBootstrap(nil, nil, err) + + c.clusterManager.EXPECT().SaveLogsManagementCluster(c.ctx, c.clusterSpec, c.bootstrapCluster) + c.clusterManager.EXPECT().SaveLogsWorkloadCluster(c.ctx, c.provider, c.clusterSpec, nil) + + c.writer.EXPECT().Write(fmt.Sprintf("%s-checkpoint.yaml", c.clusterSpec.Cluster.Name), gomock.Any()) + + err = c.run() + if err == nil { + t.Fatalf("Create.Run() expected to return an error %v", err) + } +} diff --git a/pkg/workflows/management/validate.go b/pkg/workflows/management/validate.go index f42bb2e2fc75..edf97ba02347 100644 --- a/pkg/workflows/management/validate.go +++ b/pkg/workflows/management/validate.go @@ -26,7 +26,7 @@ func (s *setupAndValidateCreate) Run(ctx context.Context, commandContext *task.C return nil } - return nil + return &createBootStrapClusterTask{} } func (s *setupAndValidateCreate) providerValidation(ctx context.Context, commandContext *task.CommandContext) []validations.Validation {