Skip to content

Commit

Permalink
Fix NPE in vSphere etcd machine datastore usage calculation (#8245)
Browse files Browse the repository at this point in the history
  • Loading branch information
abhay-krishna authored Jun 11, 2024
1 parent 28d6212 commit 9919e6c
Show file tree
Hide file tree
Showing 3 changed files with 251 additions and 45 deletions.
108 changes: 108 additions & 0 deletions pkg/providers/vsphere/testdata/cluster_main_stacked_etcd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: Cluster
metadata:
name: test
namespace: test-namespace
spec:
controlPlaneConfiguration:
count: 3
endpoint:
host: 1.2.3.4
machineGroupRef:
name: test-cp
kind: VSphereMachineConfig
kubernetesVersion: "1.19"
workerNodeGroupConfigurations:
- count: 3
machineGroupRef:
name: test-wn
kind: VSphereMachineConfig
name: md-0
datacenterRef:
kind: VSphereDatacenterConfig
name: test
clusterNetwork:
cni: "cilium"
pods:
cidrBlocks:
- 192.168.0.0/16
services:
cidrBlocks:
- 10.96.0.0/12
node:
cidrMaskSize: 8
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: VSphereMachineConfig
metadata:
name: test-cp
namespace: test-namespace
spec:
diskGiB: 25
cloneMode: linkedClone
datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore"
folder: "/SDDC-Datacenter/vm"
memoryMiB: 8192
numCPUs: 2
osFamily: ubuntu
resourcePool: "*/Resources"
storagePolicyName: "vSAN Default Storage Policy"
template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6"
users:
- name: capv
sshAuthorizedKeys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com"
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: VSphereMachineConfig
metadata:
name: test-wn
namespace: test-namespace
spec:
diskGiB: 25
cloneMode: linkedClone
datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore"
folder: "/SDDC-Datacenter/vm"
memoryMiB: 4096
numCPUs: 3
osFamily: ubuntu
resourcePool: "*/Resources"
storagePolicyName: "vSAN Default Storage Policy"
template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6"
users:
- name: capv
sshAuthorizedKeys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com"
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: VSphereMachineConfig
metadata:
name: test-etcd
namespace: test-namespace
spec:
diskGiB: 25
cloneMode: linkedClone
datastore: "/SDDC-Datacenter/datastore/WorkloadDatastore"
folder: "/SDDC-Datacenter/vm"
memoryMiB: 4096
numCPUs: 3
osFamily: ubuntu
resourcePool: "*/Resources"
storagePolicyName: "vSAN Default Storage Policy"
template: "/SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6"
users:
- name: capv
sshAuthorizedKeys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com"
---
apiVersion: anywhere.eks.amazonaws.com/v1alpha1
kind: VSphereDatacenterConfig
metadata:
name: test
namespace: test-namespace
spec:
datacenter: "SDDC-Datacenter"
network: "/SDDC-Datacenter/network/sddc-cgw-network-1"
server: "vsphere_server"
thumbprint: "ABCDEFG"
insecure: false
42 changes: 23 additions & 19 deletions pkg/providers/vsphere/vsphere.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,37 +106,37 @@ type ProviderGovcClient interface {
AddTag(ctx context.Context, path, tag string) error
ListCategories(ctx context.Context) ([]string, error)
CreateCategoryForVM(ctx context.Context, name string) error
CreateUser(ctx context.Context, username string, password string) error
CreateUser(ctx context.Context, username, password string) error
UserExists(ctx context.Context, username string) (bool, error)
CreateGroup(ctx context.Context, name string) error
GroupExists(ctx context.Context, name string) (bool, error)
AddUserToGroup(ctx context.Context, name string, username string) error
AddUserToGroup(ctx context.Context, name, username string) error
RoleExists(ctx context.Context, name string) (bool, error)
CreateRole(ctx context.Context, name string, privileges []string) error
SetGroupRoleOnObject(ctx context.Context, principal string, role string, object string, domain string) error
SetGroupRoleOnObject(ctx context.Context, principal, role, object, domain string) error
GetHardDiskSize(ctx context.Context, vm, datacenter string) (map[string]float64, error)
GetResourcePoolInfo(ctx context.Context, datacenter, resourcepool string, args ...string) (map[string]int, error)
}

type ProviderKubectlClient interface {
ApplyKubeSpecFromBytes(ctx context.Context, cluster *types.Cluster, data []byte) error
CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig string, namespace string) error
LoadSecret(ctx context.Context, secretObject string, secretObjType string, secretObjectName string, kubeConfFile string) error
CreateNamespaceIfNotPresent(ctx context.Context, kubeconfig, namespace string) error
LoadSecret(ctx context.Context, secretObject, secretObjType, secretObjectName, kubeConfFile string) error
GetEksaCluster(ctx context.Context, cluster *types.Cluster, clusterName string) (*v1alpha1.Cluster, error)
GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereDatacenterConfig, error)
GetEksaVSphereMachineConfig(ctx context.Context, vsphereMachineConfigName string, kubeconfigFile string, namespace string) (*v1alpha1.VSphereMachineConfig, error)
GetEksaVSphereDatacenterConfig(ctx context.Context, vsphereDatacenterConfigName, kubeconfigFile, namespace string) (*v1alpha1.VSphereDatacenterConfig, error)
GetEksaVSphereMachineConfig(ctx context.Context, vsphereMachineConfigName, kubeconfigFile, namespace string) (*v1alpha1.VSphereMachineConfig, error)
GetMachineDeployment(ctx context.Context, machineDeploymentName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error)
GetKubeadmControlPlane(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*controlplanev1.KubeadmControlPlane, error)
GetEtcdadmCluster(ctx context.Context, cluster *types.Cluster, clusterName string, opts ...executables.KubectlOpt) (*etcdv1.EtcdadmCluster, error)
GetSecretFromNamespace(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.Secret, error)
UpdateAnnotation(ctx context.Context, resourceType, objectName string, annotations map[string]string, opts ...executables.KubectlOpt) error
RemoveAnnotationInNamespace(ctx context.Context, resourceType, objectName, key string, cluster *types.Cluster, namespace string) error
SearchVsphereMachineConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereMachineConfig, error)
SearchVsphereDatacenterConfig(ctx context.Context, name string, kubeconfigFile string, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error)
SearchVsphereMachineConfig(ctx context.Context, name, kubeconfigFile, namespace string) ([]*v1alpha1.VSphereMachineConfig, error)
SearchVsphereDatacenterConfig(ctx context.Context, name, kubeconfigFile, namespace string) ([]*v1alpha1.VSphereDatacenterConfig, error)
SetDaemonSetImage(ctx context.Context, kubeconfigFile, name, namespace, container, image string) error
DeleteEksaDatacenterConfig(ctx context.Context, vsphereDatacenterResourceType string, vsphereDatacenterConfigName string, kubeconfigFile string, namespace string) error
DeleteEksaMachineConfig(ctx context.Context, vsphereMachineResourceType string, vsphereMachineConfigName string, kubeconfigFile string, namespace string) error
ApplyTolerationsFromTaintsToDaemonSet(ctx context.Context, oldTaints []corev1.Taint, newTaints []corev1.Taint, dsName string, kubeconfigFile string) error
DeleteEksaDatacenterConfig(ctx context.Context, vsphereDatacenterResourceType, vsphereDatacenterConfigName, kubeconfigFile, namespace string) error
DeleteEksaMachineConfig(ctx context.Context, vsphereMachineResourceType, vsphereMachineConfigName, kubeconfigFile, namespace string) error
ApplyTolerationsFromTaintsToDaemonSet(ctx context.Context, oldTaints, newTaints []corev1.Taint, dsName, kubeconfigFile string) error
}

// IPValidator is an interface that defines methods to validate the control plane IP.
Expand Down Expand Up @@ -376,7 +376,7 @@ func (p *vsphereProvider) SetupAndValidateCreateCluster(ctx context.Context, clu
return nil
}

func (p *vsphereProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec, _ *cluster.Spec) error {
func (p *vsphereProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec, _ *cluster.Spec) error {

Check failure on line 379 in pkg/providers/vsphere/vsphere.go

View workflow job for this annotation

GitHub Actions / lint

cyclomatic complexity 12 of func `(*vsphereProvider).SetupAndValidateUpgradeCluster` is high (> 10) (gocyclo)
if err := SetupEnvVars(clusterSpec.VSphereDatacenter); err != nil {
return fmt.Errorf("failed setup and validations: %v", err)
}
Expand Down Expand Up @@ -545,7 +545,11 @@ func (p *vsphereProvider) validateDatastoreUsageForUpgrade(ctx context.Context,

etcdMachineConfig := currentClusterSpec.etcdMachineConfig()
if etcdMachineConfig != nil {
if err := p.calculateDatastoreUsage(ctx, etcdMachineConfig, cluster, usage, prevEksaCluster.Spec.ExternalEtcdConfiguration.Count, currentClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count); err != nil {
prevCount := 0
if prevEksaCluster.Spec.ExternalEtcdConfiguration != nil {
prevCount = prevEksaCluster.Spec.ExternalEtcdConfiguration.Count
}
if err := p.calculateDatastoreUsage(ctx, etcdMachineConfig, cluster, usage, prevCount, currentClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.Count); err != nil {
return fmt.Errorf("calculating datastore usage: %v", err)
}
}
Expand Down Expand Up @@ -652,7 +656,7 @@ func (p *vsphereProvider) validateMemoryUsage(ctx context.Context, clusterSpec *
}
for resourcePool, remaniningMiB := range memoryUsage {
if remaniningMiB != -1 && remaniningMiB < 0 {
return fmt.Errorf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool)
return fmt.Errorf("not enough memory available in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool)
}
}
logger.V(5).Info("Memory availability for machine configs in requested resource pool validated")
Expand Down Expand Up @@ -737,7 +741,7 @@ func NeedsNewWorkloadTemplate(oldSpec, newSpec *cluster.Spec, oldVdc, newVdc *v1
return AnyImmutableFieldChanged(oldVdc, newVdc, oldVmc, newVmc)
}

func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc *v1alpha1.VSphereMachineConfig, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) bool {
func NeedsNewKubeadmConfigTemplate(newWorkerNodeGroup, oldWorkerNodeGroup *v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) bool {

Check failure on line 744 in pkg/providers/vsphere/vsphere.go

View workflow job for this annotation

GitHub Actions / lint

exported: exported function NeedsNewKubeadmConfigTemplate should have comment or be unexported (revive)
return !v1alpha1.TaintsSliceEqual(newWorkerNodeGroup.Taints, oldWorkerNodeGroup.Taints) || !v1alpha1.MapEqual(newWorkerNodeGroup.Labels, oldWorkerNodeGroup.Labels) ||
!v1alpha1.UsersSliceEqual(oldWorkerNodeVmc.Spec.Users, newWorkerNodeVmc.Spec.Users)
}
Expand Down Expand Up @@ -1125,15 +1129,15 @@ func (p *vsphereProvider) getWorkerNodeMachineConfigs(ctx context.Context, workl
return nil, nil, nil
}

func (p *vsphereProvider) needsNewMachineTemplate(currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, vdc *v1alpha1.VSphereDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerMachineConfig *v1alpha1.VSphereMachineConfig, newWorkerMachineConfig *v1alpha1.VSphereMachineConfig) (bool, error) {
func (p *vsphereProvider) needsNewMachineTemplate(currentSpec, newClusterSpec *cluster.Spec, workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, vdc *v1alpha1.VSphereDatacenterConfig, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerMachineConfig, newWorkerMachineConfig *v1alpha1.VSphereMachineConfig) (bool, error) {
if prevWorkerNode, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
needsNewWorkloadTemplate := NeedsNewWorkloadTemplate(currentSpec, newClusterSpec, vdc, newClusterSpec.VSphereDatacenter, oldWorkerMachineConfig, newWorkerMachineConfig, prevWorkerNode, workerNodeGroupConfiguration)
return needsNewWorkloadTemplate, nil
}
return true, nil
}

func (p *vsphereProvider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc *v1alpha1.VSphereMachineConfig, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) (bool, error) {
func (p *vsphereProvider) needsNewKubeadmConfigTemplate(workerNodeGroupConfiguration v1alpha1.WorkerNodeGroupConfiguration, prevWorkerNodeGroupConfigs map[string]v1alpha1.WorkerNodeGroupConfiguration, oldWorkerNodeVmc, newWorkerNodeVmc *v1alpha1.VSphereMachineConfig) (bool, error) {
if _, ok := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]; ok {
existingWorkerNodeGroupConfig := prevWorkerNodeGroupConfigs[workerNodeGroupConfiguration.Name]
return NeedsNewKubeadmConfigTemplate(&workerNodeGroupConfiguration, &existingWorkerNodeGroupConfig, oldWorkerNodeVmc, newWorkerNodeVmc), nil
Expand Down Expand Up @@ -1210,7 +1214,7 @@ func (p *vsphereProvider) Version(components *cluster.ManagementComponents) stri
return components.VSphere.Version
}

func (p *vsphereProvider) RunPostControlPlaneUpgrade(_ context.Context, _ *cluster.Spec, _ *cluster.Spec, _ *types.Cluster, _ *types.Cluster) error {
func (p *vsphereProvider) RunPostControlPlaneUpgrade(_ context.Context, _, _ *cluster.Spec, _, _ *types.Cluster) error {
return nil
}

Expand Down
Loading

0 comments on commit 9919e6c

Please sign in to comment.