Skip to content

Commit

Permalink
Fix rolling upgrade logic and separate eksa version upgrade logic
Browse files Browse the repository at this point in the history
Signed-off-by: Rahul Ganesh <rahulgab@amazon.com>
  • Loading branch information
Rahul Ganesh committed Oct 3, 2023
1 parent 78f9999 commit 1bc8079
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 44 deletions.
81 changes: 48 additions & 33 deletions pkg/providers/tinkerbell/assert.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,10 +300,12 @@ func (v *ValidatableTinkerbellClusterSpec) WorkerNodeHardwareGroups() []WorkerNo
return workerNodeGroupConfigs
}

// ClusterK8sVersion retrieves the Kubernetes version set at the cluster level.
func (v *ValidatableTinkerbellClusterSpec) ClusterK8sVersion() *v1alpha1.KubernetesVersion {
return &v.Cluster.Spec.KubernetesVersion

Check warning on line 305 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L304-L305

Added lines #L304 - L305 were not covered by tests
}

// WorkerNodeGroupK8sVersion returns each worker node group with its associated Kubernetes version.
func (v *ValidatableTinkerbellClusterSpec) WorkerNodeGroupK8sVersion() map[string]*v1alpha1.KubernetesVersion {
return WorkerNodeGroupWithK8sVersion(v.ClusterSpec.Spec)

Check warning on line 310 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L309-L310

Added lines #L309 - L310 were not covered by tests
}
Expand Down Expand Up @@ -332,10 +334,12 @@ func (v *ValidatableTinkerbellCAPI) WorkerNodeHardwareGroups() []WorkerNodeHardw
return workerNodeHardwareList
}

// ClusterK8sVersion returns the Kubernetes version in major.minor format for a ValidatableTinkerbellCAPI.
func (v *ValidatableTinkerbellCAPI) ClusterK8sVersion() *v1alpha1.KubernetesVersion {
return v.extractK8sVersion(v.KubeadmControlPlane.Spec.Version)

Check warning on line 339 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L338-L339

Added lines #L338 - L339 were not covered by tests
}

// WorkerNodeGroupK8sVersion returns each worker node group mapped to Kubernetes version in major.minor format for a ValidatableTinkerbellCAPI.
func (v *ValidatableTinkerbellCAPI) WorkerNodeGroupK8sVersion() map[string]*v1alpha1.KubernetesVersion {
WNGK8sversion := make(map[string]*v1alpha1.KubernetesVersion)
for _, wng := range v.WorkerGroups {
Expand Down Expand Up @@ -428,7 +432,7 @@ func AssertionsForScaleUpDown(catalogue *hardware.Catalogue, current Validatable

// ExtraHardwareAvailableAssertionForRollingUpgrade asserts that catalogue has sufficient hardware to
// support the ClusterSpec during an rolling upgrade workflow.
func ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue *hardware.Catalogue, current ValidatableCluster) ClusterSpecAssertion {
func ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue *hardware.Catalogue, current ValidatableCluster, eksaVersionUpgrade bool) ClusterSpecAssertion {

Check warning on line 435 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L435

Added line #L435 was not covered by tests
return func(spec *ClusterSpec) error {
// Without Hardware selectors we get undesirable behavior so ensure we have them for
// all MachineConfigs.
Expand All @@ -440,40 +444,14 @@ func ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue *hardware.Catalo
// will account for the same selector being specified on different groups.
requirements := minimumHardwareRequirements{}

maxSurge := 1
if spec.Cluster.Spec.KubernetesVersion != *current.ClusterK8sVersion() {
if spec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil {
maxSurge = spec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge
}
err := requirements.Add(
spec.ControlPlaneMachineConfig().Spec.HardwareSelector,
maxSurge,
)
if err != nil {
return fmt.Errorf("for rolling upgrade, %v", err)
if spec.Cluster.Spec.KubernetesVersion != *current.ClusterK8sVersion() || eksaVersionUpgrade {
if err := ensureCPHardwareAvailability(spec, current, requirements); err != nil {
return err
}

Check warning on line 450 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L447-L450

Added lines #L447 - L450 were not covered by tests
}
// for WNGref, k8sversion := range current.WorkerNodeGroupK8sVersion() {
// wng := spec.MachineConfigs[WNGref]
// maxSurge = 1
// wng.UpgradeRolloutStrategy
// }
currentWNGK8sversion := current.WorkerNodeGroupK8sVersion()
desiredWNGK8sVersion := WorkerNodeGroupWithK8sVersion(spec.Spec)
for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() {
maxSurge = 1
if currentWNGK8sversion[nodeGroup.Name] != desiredWNGK8sVersion[nodeGroup.Name] {
if nodeGroup.UpgradeRolloutStrategy != nil {
maxSurge = nodeGroup.UpgradeRolloutStrategy.RollingUpdate.MaxSurge
}
err := requirements.Add(
spec.WorkerNodeGroupMachineConfig(nodeGroup).Spec.HardwareSelector,
maxSurge,
)
if err != nil {
return fmt.Errorf("for rolling upgrade, %v", err)
}
}

if err := ensureWorkerHardwareAvailability(spec, current, requirements, eksaVersionUpgrade); err != nil {
return err
}

Check warning on line 455 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L453-L455

Added lines #L453 - L455 were not covered by tests

if spec.HasExternalEtcd() {
Expand All @@ -487,6 +465,43 @@ func ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue *hardware.Catalo
}
}

func ensureCPHardwareAvailability(spec *ClusterSpec, current ValidatableCluster, hwReq minimumHardwareRequirements) error {
maxSurge := 1

if spec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil {
maxSurge = spec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy.RollingUpdate.MaxSurge
}
err := hwReq.Add(
spec.ControlPlaneMachineConfig().Spec.HardwareSelector,
maxSurge,
)
if err != nil {
return fmt.Errorf("for rolling upgrade, %v", err)
}
return nil

Check warning on line 481 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L468-L481

Added lines #L468 - L481 were not covered by tests
}

func ensureWorkerHardwareAvailability(spec *ClusterSpec, current ValidatableCluster, hwReq minimumHardwareRequirements, eksaVersionUpgrade bool) error {
currentWNGK8sversion := current.WorkerNodeGroupK8sVersion()
desiredWNGK8sVersion := WorkerNodeGroupWithK8sVersion(spec.Spec)
for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() {
maxSurge := 1
if currentWNGK8sversion[nodeGroup.Name] != desiredWNGK8sVersion[nodeGroup.Name] || eksaVersionUpgrade {

Check warning on line 489 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L484-L489

Added lines #L484 - L489 were not covered by tests
if nodeGroup.UpgradeRolloutStrategy != nil {
maxSurge = nodeGroup.UpgradeRolloutStrategy.RollingUpdate.MaxSurge
}
err := hwReq.Add(

Check warning on line 493 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L493

Added line #L493 was not covered by tests
spec.WorkerNodeGroupMachineConfig(nodeGroup).Spec.HardwareSelector,
maxSurge,
)
if err != nil {
return fmt.Errorf("for rolling upgrade, %v", err)
}
}
}
return nil

Check warning on line 502 in pkg/providers/tinkerbell/assert.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/assert.go#L502

Added line #L502 was not covered by tests
}

// ensureHardwareSelectorsSpecified ensures each machine config present in spec has a hardware
// selector.
func ensureHardwareSelectorsSpecified(spec *ClusterSpec) error {
Expand Down
3 changes: 2 additions & 1 deletion pkg/providers/tinkerbell/reconciler/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,8 @@ func (r *Reconciler) ValidateHardware(ctx context.Context, log logr.Logger, tink
if err != nil {
return controller.Result{}, err
}

Check warning on line 373 in pkg/providers/tinkerbell/reconciler/reconciler.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/reconciler/reconciler.go#L372-L373

Added lines #L372 - L373 were not covered by tests
v.Register(tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(kubeReader.GetCatalogue(), validatableCAPI))
// eksa version upgrade cannot be triggered from controller, so set it to false.
v.Register(tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(kubeReader.GetCatalogue(), validatableCAPI, false))
case NewClusterOperation:
v.Register(tinkerbell.MinimumHardwareAvailableAssertionForCreate(kubeReader.GetCatalogue()))
case NoChange:
Expand Down
18 changes: 8 additions & 10 deletions pkg/providers/tinkerbell/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,18 +145,13 @@ func (p *Provider) validateAvailableHardwareForUpgrade(ctx context.Context, curr
clusterSpecValidator := NewClusterSpecValidator(
HardwareSatisfiesOnlyOneSelectorAssertion(p.catalogue),
)

// if currentSpec.Cluster.Spec.KubernetesVersion != newClusterSpec.Cluster.Spec.KubernetesVersion ||
// currentSpec.Bundles.Spec.Number != newClusterSpec.Bundles.Spec.Number {
// clusterSpecValidator.Register(ExtraHardwareAvailableAssertionForRollingUpgrade(p.catalogue))
// rollingUpgrade = true
// }
eksaVersionUpgrade := p.isEksaVersionUpgrade(currentSpec, newClusterSpec)

currentTinkerbellSpec := NewClusterSpec(currentSpec, currentSpec.TinkerbellMachineConfigs, currentSpec.TinkerbellDatacenter)
rollingUpgrade := p.isRollingUpgrade(currentSpec, newClusterSpec)
currentCluster := &ValidatableTinkerbellClusterSpec{currentTinkerbellSpec}
if rollingUpgrade {
clusterSpecValidator.Register(ExtraHardwareAvailableAssertionForRollingUpgrade(p.catalogue, currentCluster))
clusterSpecValidator.Register(ExtraHardwareAvailableAssertionForRollingUpgrade(p.catalogue, currentCluster, eksaVersionUpgrade))
}

Check warning on line 155 in pkg/providers/tinkerbell/upgrade.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/upgrade.go#L154-L155

Added lines #L154 - L155 were not covered by tests
clusterSpecValidator.Register(AssertionsForScaleUpDown(p.catalogue, currentCluster, rollingUpgrade))

Expand Down Expand Up @@ -375,16 +370,19 @@ func (p *Provider) isScaleUpDown(oldCluster *v1alpha1.Cluster, newCluster *v1alp
return false
}

func (p *Provider) isEksaVersionUpgrade(currentSpec, newClusterSpec *cluster.Spec) bool {
return currentSpec.Bundles.Spec.Number != newClusterSpec.Bundles.Spec.Number
}

func (p *Provider) isRollingUpgrade(currentSpec, newClusterSpec *cluster.Spec) bool {
if currentSpec.Cluster.Spec.KubernetesVersion != newClusterSpec.Cluster.Spec.KubernetesVersion ||
currentSpec.Bundles.Spec.Number != newClusterSpec.Bundles.Spec.Number {
if currentSpec.Cluster.Spec.KubernetesVersion != newClusterSpec.Cluster.Spec.KubernetesVersion {
return true
}

Check warning on line 380 in pkg/providers/tinkerbell/upgrade.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/upgrade.go#L379-L380

Added lines #L379 - L380 were not covered by tests
currentWNGSwithK8sVersion := WorkerNodeGroupWithK8sVersion(currentSpec)
desiredWNGwithK8sVersion := WorkerNodeGroupWithK8sVersion(newClusterSpec)
for wngName, K8sVersion := range desiredWNGwithK8sVersion {
currentWngK8sVersion, ok := currentWNGSwithK8sVersion[wngName]
if ok && currentWngK8sVersion != K8sVersion {
if ok && (*currentWngK8sVersion != *K8sVersion) {
return true
}

Check warning on line 387 in pkg/providers/tinkerbell/upgrade.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/tinkerbell/upgrade.go#L386-L387

Added lines #L386 - L387 were not covered by tests
}
Expand Down

0 comments on commit 1bc8079

Please sign in to comment.