diff --git a/pkg/providers/vsphere/testdata/cluster_main_stacked_etcd.yaml b/pkg/providers/vsphere/testdata/cluster_main_stacked_etcd.yaml new file mode 100644 index 000000000000..bd10bc48ea95 --- /dev/null +++ b/pkg/providers/vsphere/testdata/cluster_main_stacked_etcd.yaml @@ -0,0 +1,108 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test-namespace +spec: + controlPlaneConfiguration: + count: 3 + endpoint: + host: 1.2.3.4 + machineGroupRef: + name: test-cp + kind: VSphereMachineConfig + kubernetesVersion: "1.19" + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + name: test-wn + kind: VSphereMachineConfig + name: md-0 + datacenterRef: + kind: VSphereDatacenterConfig + name: test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + node: + cidrMaskSize: 8 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-cp + namespace: test-namespace +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 8192 + numCPUs: 2 + osFamily: ubuntu + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-wn + namespace: test-namespace +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: ubuntu + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereMachineConfig +metadata: + name: test-etcd + namespace: test-namespace +spec: + diskGiB: 25 + cloneMode: linkedClone + datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore" + folder: "/SDDC-Datacenter/vm" + memoryMiB: 4096 + numCPUs: 3 + osFamily: ubuntu + resourcePool: "*/Resources" + storagePolicyName: "vSAN Default Storage Policy" + template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + users: + - name: capv + sshAuthorizedKeys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: VSphereDatacenterConfig +metadata: + name: test + namespace: test-namespace +spec: + datacenter: "SDDC-Datacenter" + network: "/SDDC-Datacenter/network/sddc-cgw-network-1" + server: "vsphere_server" + thumbprint: "ABCDEFG" + insecure: false diff --git a/pkg/providers/vsphere/vsphere.go b/pkg/providers/vsphere/vsphere.go index 7c0a330b92fc..8693c1a364b7 100644 --- a/pkg/providers/vsphere/vsphere.go +++ b/pkg/providers/vsphere/vsphere.go @@ -106,37 +106,37 @@ type ProviderGovcClient interface { AddTag(ctx context.Context, path, tag string) error ListCategories(ctx context.Context) ([]string, error) CreateCategoryForVM(ctx context.Context, name string) error - CreateUser(ctx context.Context, username string, password string) error + CreateUser(ctx context.Context, username, password string) error UserExists(ctx context.Context, username string) (bool, error) CreateGroup(ctx context.Context, name string) error GroupExists(ctx context.Context, name string) (bool, error) - AddUserToGroup(ctx context.Context, name string, username string) error + AddUserToGroup(ctx context.Context, name, username string) error RoleExists(ctx context.Context, name string) (bool, error) CreateRole(ctx context.Context, name string, privileges []string) error - SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error + SetGroupRoleOnObject(ctx context.Context, principal, role, object, domain string) error GetHardDiskSize(ctx context.Context, vm, datacenter string) (map[string]float64, error) GetResourcePoolInfo(ctx context.Context, datacenter, resourcepool string, args ...string) (map[string]int, error) } type ProviderKubectlClient interface { ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error - CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error - LoadSecret(ctx context.Context, secretObject string, secretObjType string, secretObjectName string, kubeConfFile string) error + CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig, namespace string) error + LoadSecret(ctx context.Context, secretObject, secretObjType, secretObjectName, kubeConfFile string) error GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error) - GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error) - GetEksaVSphereMachineConfig(ctx context.Context, vsphereMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereMachineConfig, error) + GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName, kubeconfigFile, namespace string) (*v1alpha1.VSphereDatacenterConfig, error) + GetEksaVSphereMachineConfig(ctx context.Context, vsphereMachineConfigName, kubeconfigFile, namespace string) (*v1alpha1.VSphereMachineConfig, error) GetMachineDeployment(ctx context.Context, machineDeploymentName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error) GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error) GetEtcdadmCluster(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*etcdv1.EtcdadmCluster, error) GetSecretFromNamespace(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.Secret, error) UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...executables.KubectlOpt) error RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error - SearchVsphereMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereMachineConfig, error) - SearchVsphereDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error) + SearchVsphereMachineConfig(ctx context.Context, name, kubeconfigFile, namespace string) ([]*v1alpha1.VSphereMachineConfig, error) + SearchVsphereDatacenterConfig(ctx context.Context, name, kubeconfigFile, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error) SetDaemonSetImage(ctx context.Context, kubeconfigFile, name, namespace, container, image string) error - DeleteEksaDatacenterConfig(ctx context.Context, vsphereDatacenterResourceType string, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) error - DeleteEksaMachineConfig(ctx context.Context, vsphereMachineResourceType string, vsphereMachineConfigName string, kubeconfigFile string, namespace string) error - ApplyTolerationsFromTaintsToDaemonSet(ctx context.Context, oldTaints []corev1.Taint, newTaints []corev1.Taint, dsName string, kubeconfigFile string) error + DeleteEksaDatacenterConfig(ctx context.Context, vsphereDatacenterResourceType, vsphereDatacenterConfigName, kubeconfigFile, namespace string) error + DeleteEksaMachineConfig(ctx context.Context, vsphereMachineResourceType, vsphereMachineConfigName, kubeconfigFile, namespace string) error + ApplyTolerationsFromTaintsToDaemonSet(ctx context.Context, oldTaints, newTaints []corev1.Taint, dsName, kubeconfigFile string) error } // IPValidator is an interface that defines methods to validate the control plane IP. @@ -376,7 +376,7 @@ func (p *vsphereProvider) SetupAndValidateCreateCluster(ctx context.Context, clu return nil } -func (p *vsphereProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, _ *cluster.Spec) error { +func (p *vsphereProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec, _ *cluster.Spec) error { if err := SetupEnvVars(clusterSpec.VSphereDatacenter); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } @@ -545,7 +545,11 @@ func (p *vsphereProvider) validateDatastoreUsageForUpgrade(ctx context.Context, etcdMachineConfig := currentClusterSpec.etcdMachineConfig() if etcdMachineConfig != nil { - if err := p.calculateDatastoreUsage(ctx, etcdMachineConfig, cluster, usage, prevEksaCluster.Spec.ExternalEtcdConfiguration.Count, currentClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count); err != nil { + prevCount := 0 + if prevEksaCluster.Spec.ExternalEtcdConfiguration != nil { + prevCount = prevEksaCluster.Spec.ExternalEtcdConfiguration.Count + } + if err := p.calculateDatastoreUsage(ctx, etcdMachineConfig, cluster, usage, prevCount, currentClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count); err != nil { return fmt.Errorf("calculating datastore usage: %v", err) } } @@ -652,7 +656,7 @@ func (p *vsphereProvider) validateMemoryUsage(ctx context.Context, clusterSpec * } for resourcePool, remaniningMiB := range memoryUsage { if remaniningMiB != -1 && remaniningMiB < 0 { - return fmt.Errorf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool) + return fmt.Errorf("not enough memory available in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool) } } logger.V(5).Info("Memory availability for machine configs in requested resource pool validated") @@ -737,7 +741,7 @@ func NeedsNewWorkloadTemplate(oldSpec, newSpec *cluster.Spec, oldVdc, newVdc *v1 return AnyImmutableFieldChanged(oldVdc, newVdc, oldVmc, newVmc) } -func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc *v1alpha1.VSphereMachineConfig, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) bool { +func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) bool { return !v1alpha1.TaintsSliceEqual(newWorkerNodeGroup.Taints, oldWorkerNodeGroup.Taints) || !v1alpha1.MapEqual(newWorkerNodeGroup.Labels, oldWorkerNodeGroup.Labels) || !v1alpha1.UsersSliceEqual(oldWorkerNodeVmc.Spec.Users, newWorkerNodeVmc.Spec.Users) } @@ -1125,7 +1129,7 @@ func (p *vsphereProvider) getWorkerNodeMachineConfigs(ctx context.Context, workl return nil, nil, nil } -func (p *vsphereProvider) needsNewMachineTemplate(currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, vdc *v1alpha1.VSphereDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerMachineConfig *v1alpha1.VSphereMachineConfig, newWorkerMachineConfig *v1alpha1.VSphereMachineConfig) (bool, error) { +func (p *vsphereProvider) needsNewMachineTemplate(currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, vdc *v1alpha1.VSphereDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerMachineConfig, newWorkerMachineConfig *v1alpha1.VSphereMachineConfig) (bool, error) { if prevWorkerNode, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok { needsNewWorkloadTemplate := NeedsNewWorkloadTemplate(currentSpec, newClusterSpec, vdc, newClusterSpec.VSphereDatacenter, oldWorkerMachineConfig, newWorkerMachineConfig, prevWorkerNode, workerNodeGroupConfiguration) return needsNewWorkloadTemplate, nil @@ -1133,7 +1137,7 @@ func (p *vsphereProvider) needsNewMachineTemplate(currentSpec, newClusterSpec *c return true, nil } -func (p *vsphereProvider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc *v1alpha1.VSphereMachineConfig, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) (bool, error) { +func (p *vsphereProvider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) (bool, error) { if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok { existingWorkerNodeGroupConfig := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name] return NeedsNewKubeadmConfigTemplate(&workerNodeGroupConfiguration, &existingWorkerNodeGroupConfig, oldWorkerNodeVmc, newWorkerNodeVmc), nil @@ -1210,7 +1214,7 @@ func (p *vsphereProvider) Version(components *cluster.ManagementComponents) stri return components.VSphere.Version } -func (p *vsphereProvider) RunPostControlPlaneUpgrade(_ context.Context, _ *cluster.Spec, _ *cluster.Spec, _ *types.Cluster, _ *types.Cluster) error { +func (p *vsphereProvider) RunPostControlPlaneUpgrade(_ context.Context, _, _ *cluster.Spec, _, _ *types.Cluster) error { return nil } diff --git a/pkg/providers/vsphere/vsphere_test.go b/pkg/providers/vsphere/vsphere_test.go index ac5779dbea11..6d4ad97fda74 100644 --- a/pkg/providers/vsphere/vsphere_test.go +++ b/pkg/providers/vsphere/vsphere_test.go @@ -41,25 +41,26 @@ import ( ) const ( - testClusterConfigMainFilename = "cluster_main.yaml" - testClusterConfigMain121Filename = "cluster_main_121.yaml" - testClusterConfigMain121CPOnlyFilename = "cluster_main_121_cp_only.yaml" - testClusterConfigWithCPUpgradeStrategy = "cluster_main_121_cp_upgrade_strategy.yaml" - testClusterConfigWithMDUpgradeStrategy = "cluster_main_121_md_upgrade_strategy.yaml" - testClusterConfigRedhatFilename = "cluster_redhat_external_etcd.yaml" - testDataDir = "testdata" - expectedVSphereName = "vsphere" - expectedVSphereUsername = "vsphere_username" - expectedVSpherePassword = "vsphere_password" - expectedVSphereServer = "vsphere_server" - expectedExpClusterResourceSet = "expClusterResourceSetKey" - eksd119Release = "kubernetes-1-19-eks-4" - eksd119ReleaseTag = "eksdRelease:kubernetes-1-19-eks-4" - eksd121ReleaseTag = "eksdRelease:kubernetes-1-21-eks-4" - eksd129ReleaseTag = "eksdRelease:kubernetes-1-29-eks-4" - ubuntuOSTag = "os:ubuntu" - bottlerocketOSTag = "os:bottlerocket" - testTemplate = "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" + testClusterConfigMainFilename = "cluster_main.yaml" + testClusterConfigMainStackedEtcdFilename = "cluster_main_stacked_etcd.yaml" + testClusterConfigMain121Filename = "cluster_main_121.yaml" + testClusterConfigMain121CPOnlyFilename = "cluster_main_121_cp_only.yaml" + testClusterConfigWithCPUpgradeStrategy = "cluster_main_121_cp_upgrade_strategy.yaml" + testClusterConfigWithMDUpgradeStrategy = "cluster_main_121_md_upgrade_strategy.yaml" + testClusterConfigRedhatFilename = "cluster_redhat_external_etcd.yaml" + testDataDir = "testdata" + expectedVSphereName = "vsphere" + expectedVSphereUsername = "vsphere_username" + expectedVSpherePassword = "vsphere_password" + expectedVSphereServer = "vsphere_server" + expectedExpClusterResourceSet = "expClusterResourceSetKey" + eksd119Release = "kubernetes-1-19-eks-4" + eksd119ReleaseTag = "eksdRelease:kubernetes-1-19-eks-4" + eksd121ReleaseTag = "eksdRelease:kubernetes-1-21-eks-4" + eksd129ReleaseTag = "eksdRelease:kubernetes-1-29-eks-4" + ubuntuOSTag = "os:ubuntu" + bottlerocketOSTag = "os:bottlerocket" + testTemplate = "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6" ) type DummyProviderGovcClient struct { @@ -182,7 +183,7 @@ func (pc *DummyProviderGovcClient) CreateCategoryForVM(ctx context.Context, name return nil } -func (pc *DummyProviderGovcClient) AddUserToGroup(ctx context.Context, name string, username string) error { +func (pc *DummyProviderGovcClient) AddUserToGroup(ctx context.Context, name, username string) error { return nil } @@ -194,7 +195,7 @@ func (pc *DummyProviderGovcClient) CreateRole(ctx context.Context, name string, return nil } -func (pc *DummyProviderGovcClient) CreateUser(ctx context.Context, username string, password string) error { +func (pc *DummyProviderGovcClient) CreateUser(ctx context.Context, username, password string) error { return nil } @@ -210,7 +211,7 @@ func (pc *DummyProviderGovcClient) RoleExists(ctx context.Context, name string) return false, nil } -func (pc *DummyProviderGovcClient) SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error { +func (pc *DummyProviderGovcClient) SetGroupRoleOnObject(ctx context.Context, principal, role, object, domain string) error { return nil } @@ -374,6 +375,40 @@ func newProviderTest(t *testing.T) *providerTest { return p } +func newStackedEtcdProviderTest(t *testing.T) *providerTest { + setupContext(t) + ctrl := gomock.NewController(t) + kubectl := mocks.NewMockProviderKubectlClient(ctrl) + govc := mocks.NewMockProviderGovcClient(ctrl) + vscb, _ := newMockVSphereClientBuilder(ctrl) + ipValidator := mocks.NewMockIPValidator(ctrl) + spec := givenClusterSpec(t, testClusterConfigMainStackedEtcdFilename) + p := &providerTest{ + t: t, + WithT: NewWithT(t), + ctx: context.Background(), + managementCluster: &types.Cluster{ + Name: "m-cluster", + KubeconfigFile: "kubeconfig-m.kubeconfig", + }, + workloadCluster: &types.Cluster{ + Name: "test", + KubeconfigFile: "kubeconfig-w.kubeconfig", + }, + cluster: spec.Cluster, + clusterSpec: spec, + datacenterConfig: spec.VSphereDatacenter, + machineConfigs: spec.VSphereMachineConfigs, + kubectl: kubectl, + govc: govc, + clientBuilder: vscb, + ipValidator: ipValidator, + } + p.buildNewProvider() + + return p +} + func (tt *providerTest) setExpectationsForDefaultDiskAndCloneModeGovcCalls() { for _, m := range tt.machineConfigs { tt.govc.EXPECT().GetVMDiskSizeInGB(tt.ctx, m.Spec.Template, tt.datacenterConfig.Spec.Datacenter).Return(25, nil) @@ -499,7 +534,7 @@ type mockVSphereClientBuilder struct { vsc *govmomi_mocks.MockVSphereClient } -func (mvscb *mockVSphereClientBuilder) Build(ctx context.Context, host string, username string, password string, insecure bool, datacenter string) (govmomi.VSphereClient, error) { +func (mvscb *mockVSphereClientBuilder) Build(ctx context.Context, host, username, password string, insecure bool, datacenter string) (govmomi.VSphereClient, error) { return mvscb.vsc, nil } @@ -1760,6 +1795,26 @@ func TestSetupAndValidateUpgradeClusterNoPassword(t *testing.T) { thenErrorExpected(t, "failed setup and validations: EKSA_VSPHERE_PASSWORD is not set or is empty", err) } +func TestSetupAndValidateUpgradeClusterDatastoreUsageSuccess(t *testing.T) { + ctx := context.Background() + clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename) + cluster := &types.Cluster{} + provider := givenProvider(t) + mockCtrl := gomock.NewController(t) + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + provider.providerKubectlClient = kubectl + setupContext(t) + + controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name + kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).AnyTimes() + kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Return(clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName], nil).AnyTimes() + + err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec) + if err != nil { + t.Fatalf("unexpected failure %v", err) + } +} + func TestSetupAndValidateUpgradeClusterDatastoreUsageError(t *testing.T) { ctx := context.Background() clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename) @@ -3447,6 +3502,44 @@ func TestValidateMachineConfigsDatastoreUsageCreateError(t *testing.T) { thenErrorExpected(t, fmt.Sprintf("not enough space in datastore %s for given diskGiB and count for respective machine groups", tt.clusterSpec.VSphereMachineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec.Datastore), err) } +func TestValidateMachineConfigsDatastoreUsageUpgradeSuccess(t *testing.T) { + tt := newProviderTest(t) + cluster := &types.Cluster{ + Name: "test", + } + tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.GetName()).Return(tt.clusterSpec.Cluster.DeepCopy(), nil) + machineConfigs := tt.clusterSpec.VSphereMachineConfigs + for _, config := range machineConfigs { + tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).AnyTimes() + tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, config.Spec.Datastore).Return(300.0, nil) + } + vSpec := NewSpec(tt.clusterSpec) + vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2 + err := tt.provider.validateDatastoreUsageForUpgrade(tt.ctx, vSpec, cluster) + if err != nil { + t.Fatalf("unexpected failure %v", err) + } +} + +func TestValidateMachineConfigsDatastoreUsageStackedToUnstackedUpgradeSuccess(t *testing.T) { + tt := newStackedEtcdProviderTest(t) + fmt.Println(tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration) + cluster := &types.Cluster{ + Name: "test", + } + tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.GetName()).Return(tt.clusterSpec.Cluster.DeepCopy(), nil) + machineConfigs := tt.clusterSpec.VSphereMachineConfigs + for _, config := range machineConfigs { + tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).Return(config, nil).AnyTimes() + tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, config.Spec.Datastore).Return(300.0, nil).AnyTimes() + } + vSpec := NewSpec(givenClusterSpec(t, testClusterConfigMainFilename)) + err := tt.provider.validateDatastoreUsageForUpgrade(tt.ctx, vSpec, cluster) + if err != nil { + t.Fatalf("unexpected failure %v", err) + } +} + func TestValidateMachineConfigsDatastoreUsageUpgradeError(t *testing.T) { tt := newProviderTest(t) cluster := &types.Cluster{ @@ -3456,9 +3549,10 @@ func TestValidateMachineConfigsDatastoreUsageUpgradeError(t *testing.T) { machineConfigs := tt.clusterSpec.VSphereMachineConfigs for _, config := range machineConfigs { tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).AnyTimes() - tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, config.Spec.Datastore).Return(50.0, nil) + tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, config.Spec.Datastore).Return(300.0, nil) } vSpec := NewSpec(tt.clusterSpec) + vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 5 err := tt.provider.validateDatastoreUsageForUpgrade(tt.ctx, vSpec, cluster) thenErrorExpected(t, fmt.Sprintf("not enough space in datastore %s for given diskGiB and count for respective machine groups", tt.clusterSpec.VSphereMachineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name].Spec.Datastore), err) } @@ -3488,7 +3582,7 @@ func TestValidateMachineConfigsMemoryUsageCreateError(t *testing.T) { vSpec := NewSpec(tt.clusterSpec) err := tt.provider.validateMemoryUsage(tt.ctx, vSpec, nil) resourcePool := machineConfigs[tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.ResourcePool - thenErrorExpected(t, fmt.Sprintf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err) + thenErrorExpected(t, fmt.Sprintf("not enough memory available in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err) } func TestSetupAndValidateCreateClusterMemoryUsageError(t *testing.T) { @@ -3553,7 +3647,7 @@ func TestValidateMachineConfigsMemoryUsageUpgradeError(t *testing.T) { vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2 err := tt.provider.validateMemoryUsage(tt.ctx, vSpec, cluster) resourcePool := machineConfigs[tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.ResourcePool - thenErrorExpected(t, fmt.Sprintf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err) + thenErrorExpected(t, fmt.Sprintf("not enough memory available in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err) } func TestSetupAndValidateUpgradeClusterMemoryUsageError(t *testing.T) {