Skip to content

Commit

Permalink
Add unit tests and fix edge case of rolling vs eksa version found dur…
Browse files Browse the repository at this point in the history
…ing testing

Signed-off-by: Rahul Ganesh <rahulgab@amazon.com>
  • Loading branch information
Rahul Ganesh committed Oct 4, 2023
1 parent 1bc8079 commit c23414b
Show file tree
Hide file tree
Showing 7 changed files with 334 additions and 5 deletions.
7 changes: 7 additions & 0 deletions pkg/api/v1alpha1/tinkerbellmachineconfig_types_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,13 @@ func TestTinkerbellMachineConfigValidateFail(t *testing.T) {
),
expectedErr: "HostOSConfiguration is invalid for TinkerbellMachineConfig tinkerbellmachineconfig: NTPConfiguration.Servers can not be empty",
},
{
name: "Invalid OS Image URL",
machineConfig: CreateTinkerbellMachineConfig(func(mc *TinkerbellMachineConfig) {
mc.Spec.OSImageURL = "test"
}),
expectedErr: "parsing osImageOverride: parse \"test\": invalid URI for request",
},
}

for _, tc := range tests {
Expand Down
4 changes: 3 additions & 1 deletion pkg/providers/tinkerbell/assert.go
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,9 @@ func ensureWorkerHardwareAvailability(spec *ClusterSpec, current ValidatableClus
desiredWNGK8sVersion := WorkerNodeGroupWithK8sVersion(spec.Spec)
for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() {
maxSurge := 1
if currentWNGK8sversion[nodeGroup.Name] != desiredWNGK8sVersion[nodeGroup.Name] || eksaVersionUpgrade {
// As rolling upgrades and scale up/down is not permitted in a single operation, its safe to access directly using the md name.
mdName := fmt.Sprintf("%s-%s", spec.Cluster.Name, nodeGroup.Name)
if *currentWNGK8sversion[mdName] != *desiredWNGK8sVersion[mdName] || eksaVersionUpgrade {
if nodeGroup.UpgradeRolloutStrategy != nil {
maxSurge = nodeGroup.UpgradeRolloutStrategy.RollingUpdate.MaxSurge
}
Expand Down
173 changes: 173 additions & 0 deletions pkg/providers/tinkerbell/assert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package tinkerbell_test

import (
"errors"
"fmt"
"net"
"testing"
"time"
Expand Down Expand Up @@ -119,6 +120,42 @@ func TestAssertMachineConfigNamespaceMatchesDatacenterConfig_Different(t *testin
g.Expect(err).ToNot(gomega.Succeed())
}

func TestAssertMachineConfigOSImageURLOverlap_Error(t *testing.T) {
g := gomega.NewWithT(t)
builder := NewDefaultValidClusterSpecBuilder()
clusterSpec := builder.Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
clusterSpec.DatacenterConfig.Spec.OSImageURL = "test-url"
clusterSpec.MachineConfigs[builder.ControlPlaneMachineName].Spec.OSImageURL = "test-url"
err := tinkerbell.AssertOSImageURLDontOverlap(clusterSpec)
g.Expect(err).ToNot(gomega.Succeed())
}

func TestAssertMachineConfigOSImageURLNotSpecified_Error(t *testing.T) {
g := gomega.NewWithT(t)
builder := NewDefaultValidClusterSpecBuilder()
clusterSpec := builder.Build()
clusterSpec.DatacenterConfig.Spec.OSImageURL = ""
// set OsImageURL at machineConfig level but not for all machine configs
clusterSpec.MachineConfigs[builder.ControlPlaneMachineName].Spec.OSImageURL = "test-url"
err := tinkerbell.AssertOSImageURLDontOverlap(clusterSpec)
g.Expect(err).ToNot(gomega.Succeed())
}

func TestAssertMachineConfigOSImageURLSpecified_Succeed(t *testing.T) {
g := gomega.NewWithT(t)
builder := NewDefaultValidClusterSpecBuilder()
clusterSpec := builder.Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
clusterSpec.DatacenterConfig.Spec.OSImageURL = ""
// set OsImageURL at machineConfig level but not for all machine configs
clusterSpec.MachineConfigs[builder.ControlPlaneMachineName].Spec.OSImageURL = "test-url"
clusterSpec.MachineConfigs[builder.ExternalEtcdMachineName].Spec.OSImageURL = "test-url"
clusterSpec.MachineConfigs[builder.WorkerNodeGroupMachineName].Spec.OSImageURL = "test-url"
err := tinkerbell.AssertOSImageURLDontOverlap(clusterSpec)
g.Expect(err).To(gomega.Succeed())
}

func TestAssertEtcdMachineRefExists_Exists(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
Expand Down Expand Up @@ -480,6 +517,27 @@ func TestValidatableClusterWorkerNodeGroupConfigs(t *testing.T) {
g.Expect(workerConfigs[0].Replicas).To(gomega.Equal(1))
}

func TestValidatableClusterClusterK8sVersion(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube125
validatableCluster := &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}

g.Expect(*validatableCluster.ClusterK8sVersion()).To(gomega.Equal(eksav1alpha1.Kube125))
}

func TestValidatableClusterWorkerNodeGroupK8sVersion(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
kube125 := eksav1alpha1.Kube125
clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube125
validatableCluster := &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}
wngK8sVersion := validatableCluster.WorkerNodeGroupK8sVersion()
mdName := fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, clusterSpec.WorkerNodeGroupConfigurations()[0].Name)

g.Expect(wngK8sVersion[mdName]).To(gomega.Equal(&kube125))
}

func TestValidatableTinkerbellCAPIControlPlaneReplicaCount(t *testing.T) {
g := gomega.NewWithT(t)

Expand All @@ -499,6 +557,24 @@ func TestValidatableTinkerbellCAPIWorkerNodeGroupConfigs(t *testing.T) {
g.Expect(workerConfigs[0].Replicas).To(gomega.Equal(1))
}

func TestValidateTinkerbellCAPIClusterK8sVersion(t *testing.T) {
g := gomega.NewWithT(t)
validatableCAPI := validatableTinkerbellCAPI()
validatableCAPI.KubeadmControlPlane.Spec.Version = "v1.27.5-eks-1-27-12"
k8sVersion := validatableCAPI.ClusterK8sVersion()
kube127 := eksav1alpha1.Kube127
g.Expect(*k8sVersion).To(gomega.Equal(kube127))
}

func TestValidateTinkerbellCAPIWorkerNodeK8sVersion(t *testing.T) {
g := gomega.NewWithT(t)
validatableCAPI := validatableTinkerbellCAPI()
wngK8sVersion := validatableCAPI.WorkerNodeGroupK8sVersion()
mdName := validatableCAPI.WorkerGroups[0].MachineDeployment.Name
kube121 := eksav1alpha1.Kube121
g.Expect(*wngK8sVersion[mdName]).To(gomega.Equal(kube121))
}

func TestAssertionsForScaleUpDown_Success(t *testing.T) {
g := gomega.NewWithT(t)

Expand Down Expand Up @@ -584,6 +660,103 @@ func TestAssertionsForScaleUpDown_AddWorkerSuccess(t *testing.T) {
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}

func TestAssertionsForRollingUpgrade_CPOnly(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
clusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube124
catalogue := hardware.NewCatalogue()
_ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"type": "cp"},
}})

kube124 := eksav1alpha1.Kube124
clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124
assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124
newClusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube125
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}

func TestAssertionsForRollingUpgrade_WorkerOnly(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
kube124 := eksav1alpha1.Kube124
clusterSpec.Cluster.Spec.KubernetesVersion = kube124
catalogue := hardware.NewCatalogue()
_ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"type": "worker"},
}})

kube125 := eksav1alpha1.Kube125
clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124
assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.Cluster.Spec.KubernetesVersion = kube124
newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube125
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}

func TestAssertionsForRollingUpgrade_BothCPWorker(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
kube124 := eksav1alpha1.Kube124
clusterSpec.Cluster.Spec.KubernetesVersion = kube124
catalogue := hardware.NewCatalogue()
_ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"type": "cp"},
}})
_ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"type": "worker"},
}})

assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
kube125 := eksav1alpha1.Kube125
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.Cluster.Spec.KubernetesVersion = kube125
g.Expect(assertion(newClusterSpec)).To(gomega.Succeed())
}

func TestAssertionsForRollingUpgrade_CPError(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
kube124 := eksav1alpha1.Kube124
clusterSpec.Cluster.Spec.KubernetesVersion = kube124
catalogue := hardware.NewCatalogue()

assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124
newClusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube125
g.Expect(assertion(newClusterSpec)).To(gomega.MatchError(gomega.ContainSubstring("minimum hardware count not met for selector '{\"type\":\"cp\"}'")))
}

func TestAssertionsForRollingUpgrade_WorkerError(t *testing.T) {
g := gomega.NewWithT(t)
clusterSpec := NewDefaultValidClusterSpecBuilder().Build()
clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
kube124 := eksav1alpha1.Kube124
kube125 := eksav1alpha1.Kube125
clusterSpec.Cluster.Spec.KubernetesVersion = kube125
clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124
catalogue := hardware.NewCatalogue()

assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false)
newClusterSpec := NewDefaultValidClusterSpecBuilder().Build()
newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil
newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube125
newClusterSpec.Cluster.Spec.KubernetesVersion = kube125
g.Expect(assertion(newClusterSpec)).To(gomega.MatchError(gomega.ContainSubstring("minimum hardware count not met for selector '{\"type\":\"worker\"}'")))
}

func TestAssertionsForScaleUpDown_ExternalEtcdErrorFails(t *testing.T) {
g := gomega.NewWithT(t)

Expand Down
51 changes: 51 additions & 0 deletions pkg/providers/tinkerbell/template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,54 @@ func TestGenerateTemplateBuilder(t *testing.T) {
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotEtcdMachineSpec).To(Equal(expectedEtcdMachineSpec))
}

func TestGenerateTemplateBuilderForMachineConfigOsImageURL(t *testing.T) {
g := NewWithT(t)
testFile := "testdata/cluster_osimage_machine_config.yaml"
clusterSpec := test.NewFullClusterSpec(t, testFile)

expectedControlPlaneMachineSpec := &v1alpha1.TinkerbellMachineConfigSpec{
HardwareSelector: map[string]string{"type": "cp"},
TemplateRef: v1alpha1.Ref{
Kind: "TinkerbellTemplateConfig",
Name: "tink-test",
},
OSFamily: "ubuntu",
OSImageURL: "https://ubuntu.gz",
Users: []v1alpha1.UserConfiguration{
{
Name: "tink-user",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="},
},
},
}
gotExpectedControlPlaneMachineSpec, err := getControlPlaneMachineSpec(clusterSpec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotExpectedControlPlaneMachineSpec).To(Equal(expectedControlPlaneMachineSpec))

expectedWorkerNodeGroupMachineSpec := map[string]v1alpha1.TinkerbellMachineConfigSpec{
"test-md": {
HardwareSelector: map[string]string{"type": "worker"},
TemplateRef: v1alpha1.Ref{
Kind: "TinkerbellTemplateConfig",
Name: "tink-test",
},
OSFamily: "ubuntu",
OSImageURL: "https://ubuntu.gz",
Users: []v1alpha1.UserConfiguration{
{
Name: "tink-user",
SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com"},
},
},
},
}
gotWorkerNodeGroupMachineSpec, err := getWorkerNodeGroupMachineSpec(clusterSpec)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotWorkerNodeGroupMachineSpec).To(Equal(expectedWorkerNodeGroupMachineSpec))

gotEtcdMachineSpec, err := getEtcdMachineSpec(clusterSpec)
var expectedEtcdMachineSpec *v1alpha1.TinkerbellMachineConfigSpec
g.Expect(err).NotTo(HaveOccurred())
g.Expect(gotEtcdMachineSpec).To(Equal(expectedEtcdMachineSpec))
}
5 changes: 3 additions & 2 deletions pkg/providers/tinkerbell/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,11 @@ func (p *Provider) validateAvailableHardwareForUpgrade(ctx context.Context, curr
currentTinkerbellSpec := NewClusterSpec(currentSpec, currentSpec.TinkerbellMachineConfigs, currentSpec.TinkerbellDatacenter)
rollingUpgrade := p.isRollingUpgrade(currentSpec, newClusterSpec)
currentCluster := &ValidatableTinkerbellClusterSpec{currentTinkerbellSpec}
if rollingUpgrade {
if rollingUpgrade || eksaVersionUpgrade {
clusterSpecValidator.Register(ExtraHardwareAvailableAssertionForRollingUpgrade(p.catalogue, currentCluster, eksaVersionUpgrade))
}
clusterSpecValidator.Register(AssertionsForScaleUpDown(p.catalogue, currentCluster, rollingUpgrade))
// ScaleUp should not be supported in case of either rolling upgrade or eksa version upgrade.
clusterSpecValidator.Register(AssertionsForScaleUpDown(p.catalogue, currentCluster, rollingUpgrade || eksaVersionUpgrade))

tinkerbellClusterSpec := NewClusterSpec(newClusterSpec, p.machineConfigs, p.datacenterConfig)

Expand Down
Loading

0 comments on commit c23414b

Please sign in to comment.