diff --git a/pkg/api/v1alpha1/tinkerbellmachineconfig_types_test.go b/pkg/api/v1alpha1/tinkerbellmachineconfig_types_test.go index ef45b1ac5088c..206fc8a981fda 100644 --- a/pkg/api/v1alpha1/tinkerbellmachineconfig_types_test.go +++ b/pkg/api/v1alpha1/tinkerbellmachineconfig_types_test.go @@ -66,6 +66,13 @@ func TestTinkerbellMachineConfigValidateFail(t *testing.T) { ), expectedErr: "HostOSConfiguration is invalid for TinkerbellMachineConfig tinkerbellmachineconfig: NTPConfiguration.Servers can not be empty", }, + { + name: "Invalid OS Image URL", + machineConfig: CreateTinkerbellMachineConfig(func(mc *TinkerbellMachineConfig) { + mc.Spec.OSImageURL = "test" + }), + expectedErr: "parsing osImageOverride: parse \"test\": invalid URI for request", + }, } for _, tc := range tests { diff --git a/pkg/providers/tinkerbell/assert.go b/pkg/providers/tinkerbell/assert.go index 7e5879d7a7b93..96474d3fa5e38 100644 --- a/pkg/providers/tinkerbell/assert.go +++ b/pkg/providers/tinkerbell/assert.go @@ -486,7 +486,9 @@ func ensureWorkerHardwareAvailability(spec *ClusterSpec, current ValidatableClus desiredWNGK8sVersion := WorkerNodeGroupWithK8sVersion(spec.Spec) for _, nodeGroup := range spec.WorkerNodeGroupConfigurations() { maxSurge := 1 - if currentWNGK8sversion[nodeGroup.Name] != desiredWNGK8sVersion[nodeGroup.Name] || eksaVersionUpgrade { + // As rolling upgrades and scale up/down is not permitted in a single operation, its safe to access directly using the md name. + mdName := fmt.Sprintf("%s-%s", spec.Cluster.Name, nodeGroup.Name) + if *currentWNGK8sversion[mdName] != *desiredWNGK8sVersion[mdName] || eksaVersionUpgrade { if nodeGroup.UpgradeRolloutStrategy != nil { maxSurge = nodeGroup.UpgradeRolloutStrategy.RollingUpdate.MaxSurge } diff --git a/pkg/providers/tinkerbell/assert_test.go b/pkg/providers/tinkerbell/assert_test.go index 92cf031a70e62..d165a539795c5 100644 --- a/pkg/providers/tinkerbell/assert_test.go +++ b/pkg/providers/tinkerbell/assert_test.go @@ -2,6 +2,7 @@ package tinkerbell_test import ( "errors" + "fmt" "net" "testing" "time" @@ -119,6 +120,42 @@ func TestAssertMachineConfigNamespaceMatchesDatacenterConfig_Different(t *testin g.Expect(err).ToNot(gomega.Succeed()) } +func TestAssertMachineConfigOSImageURLOverlap_Error(t *testing.T) { + g := gomega.NewWithT(t) + builder := NewDefaultValidClusterSpecBuilder() + clusterSpec := builder.Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + clusterSpec.DatacenterConfig.Spec.OSImageURL = "test-url" + clusterSpec.MachineConfigs[builder.ControlPlaneMachineName].Spec.OSImageURL = "test-url" + err := tinkerbell.AssertOSImageURLDontOverlap(clusterSpec) + g.Expect(err).ToNot(gomega.Succeed()) +} + +func TestAssertMachineConfigOSImageURLNotSpecified_Error(t *testing.T) { + g := gomega.NewWithT(t) + builder := NewDefaultValidClusterSpecBuilder() + clusterSpec := builder.Build() + clusterSpec.DatacenterConfig.Spec.OSImageURL = "" + // set OsImageURL at machineConfig level but not for all machine configs + clusterSpec.MachineConfigs[builder.ControlPlaneMachineName].Spec.OSImageURL = "test-url" + err := tinkerbell.AssertOSImageURLDontOverlap(clusterSpec) + g.Expect(err).ToNot(gomega.Succeed()) +} + +func TestAssertMachineConfigOSImageURLSpecified_Succeed(t *testing.T) { + g := gomega.NewWithT(t) + builder := NewDefaultValidClusterSpecBuilder() + clusterSpec := builder.Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + clusterSpec.DatacenterConfig.Spec.OSImageURL = "" + // set OsImageURL at machineConfig level but not for all machine configs + clusterSpec.MachineConfigs[builder.ControlPlaneMachineName].Spec.OSImageURL = "test-url" + clusterSpec.MachineConfigs[builder.ExternalEtcdMachineName].Spec.OSImageURL = "test-url" + clusterSpec.MachineConfigs[builder.WorkerNodeGroupMachineName].Spec.OSImageURL = "test-url" + err := tinkerbell.AssertOSImageURLDontOverlap(clusterSpec) + g.Expect(err).To(gomega.Succeed()) +} + func TestAssertEtcdMachineRefExists_Exists(t *testing.T) { g := gomega.NewWithT(t) clusterSpec := NewDefaultValidClusterSpecBuilder().Build() @@ -480,6 +517,27 @@ func TestValidatableClusterWorkerNodeGroupConfigs(t *testing.T) { g.Expect(workerConfigs[0].Replicas).To(gomega.Equal(1)) } +func TestValidatableClusterClusterK8sVersion(t *testing.T) { + g := gomega.NewWithT(t) + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube125 + validatableCluster := &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec} + + g.Expect(*validatableCluster.ClusterK8sVersion()).To(gomega.Equal(eksav1alpha1.Kube125)) +} + +func TestValidatableClusterWorkerNodeGroupK8sVersion(t *testing.T) { + g := gomega.NewWithT(t) + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + kube125 := eksav1alpha1.Kube125 + clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube125 + validatableCluster := &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec} + wngK8sVersion := validatableCluster.WorkerNodeGroupK8sVersion() + mdName := fmt.Sprintf("%s-%s", clusterSpec.Cluster.Name, clusterSpec.WorkerNodeGroupConfigurations()[0].Name) + + g.Expect(wngK8sVersion[mdName]).To(gomega.Equal(&kube125)) +} + func TestValidatableTinkerbellCAPIControlPlaneReplicaCount(t *testing.T) { g := gomega.NewWithT(t) @@ -499,6 +557,24 @@ func TestValidatableTinkerbellCAPIWorkerNodeGroupConfigs(t *testing.T) { g.Expect(workerConfigs[0].Replicas).To(gomega.Equal(1)) } +func TestValidateTinkerbellCAPIClusterK8sVersion(t *testing.T) { + g := gomega.NewWithT(t) + validatableCAPI := validatableTinkerbellCAPI() + validatableCAPI.KubeadmControlPlane.Spec.Version = "v1.27.5-eks-1-27-12" + k8sVersion := validatableCAPI.ClusterK8sVersion() + kube127 := eksav1alpha1.Kube127 + g.Expect(*k8sVersion).To(gomega.Equal(kube127)) +} + +func TestValidateTinkerbellCAPIWorkerNodeK8sVersion(t *testing.T) { + g := gomega.NewWithT(t) + validatableCAPI := validatableTinkerbellCAPI() + wngK8sVersion := validatableCAPI.WorkerNodeGroupK8sVersion() + mdName := validatableCAPI.WorkerGroups[0].MachineDeployment.Name + kube121 := eksav1alpha1.Kube121 + g.Expect(*wngK8sVersion[mdName]).To(gomega.Equal(kube121)) +} + func TestAssertionsForScaleUpDown_Success(t *testing.T) { g := gomega.NewWithT(t) @@ -584,6 +660,103 @@ func TestAssertionsForScaleUpDown_AddWorkerSuccess(t *testing.T) { g.Expect(assertion(newClusterSpec)).To(gomega.Succeed()) } +func TestAssertionsForRollingUpgrade_CPOnly(t *testing.T) { + g := gomega.NewWithT(t) + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + clusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube124 + catalogue := hardware.NewCatalogue() + _ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "cp"}, + }}) + + kube124 := eksav1alpha1.Kube124 + clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124 + assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false) + newClusterSpec := NewDefaultValidClusterSpecBuilder().Build() + newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124 + newClusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube125 + g.Expect(assertion(newClusterSpec)).To(gomega.Succeed()) +} + +func TestAssertionsForRollingUpgrade_WorkerOnly(t *testing.T) { + g := gomega.NewWithT(t) + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + kube124 := eksav1alpha1.Kube124 + clusterSpec.Cluster.Spec.KubernetesVersion = kube124 + catalogue := hardware.NewCatalogue() + _ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "worker"}, + }}) + + kube125 := eksav1alpha1.Kube125 + clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124 + assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false) + newClusterSpec := NewDefaultValidClusterSpecBuilder().Build() + newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + newClusterSpec.Cluster.Spec.KubernetesVersion = kube124 + newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube125 + g.Expect(assertion(newClusterSpec)).To(gomega.Succeed()) +} + +func TestAssertionsForRollingUpgrade_BothCPWorker(t *testing.T) { + g := gomega.NewWithT(t) + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + kube124 := eksav1alpha1.Kube124 + clusterSpec.Cluster.Spec.KubernetesVersion = kube124 + catalogue := hardware.NewCatalogue() + _ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "cp"}, + }}) + _ = catalogue.InsertHardware(&v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "worker"}, + }}) + + assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false) + newClusterSpec := NewDefaultValidClusterSpecBuilder().Build() + kube125 := eksav1alpha1.Kube125 + newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + newClusterSpec.Cluster.Spec.KubernetesVersion = kube125 + g.Expect(assertion(newClusterSpec)).To(gomega.Succeed()) +} + +func TestAssertionsForRollingUpgrade_CPError(t *testing.T) { + g := gomega.NewWithT(t) + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + kube124 := eksav1alpha1.Kube124 + clusterSpec.Cluster.Spec.KubernetesVersion = kube124 + catalogue := hardware.NewCatalogue() + + assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false) + newClusterSpec := NewDefaultValidClusterSpecBuilder().Build() + newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124 + newClusterSpec.Cluster.Spec.KubernetesVersion = eksav1alpha1.Kube125 + g.Expect(assertion(newClusterSpec)).To(gomega.MatchError(gomega.ContainSubstring("minimum hardware count not met for selector '{\"type\":\"cp\"}'"))) +} + +func TestAssertionsForRollingUpgrade_WorkerError(t *testing.T) { + g := gomega.NewWithT(t) + clusterSpec := NewDefaultValidClusterSpecBuilder().Build() + clusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + kube124 := eksav1alpha1.Kube124 + kube125 := eksav1alpha1.Kube125 + clusterSpec.Cluster.Spec.KubernetesVersion = kube125 + clusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube124 + catalogue := hardware.NewCatalogue() + + assertion := tinkerbell.ExtraHardwareAvailableAssertionForRollingUpgrade(catalogue, &tinkerbell.ValidatableTinkerbellClusterSpec{clusterSpec}, false) + newClusterSpec := NewDefaultValidClusterSpecBuilder().Build() + newClusterSpec.Spec.Cluster.Spec.ExternalEtcdConfiguration = nil + newClusterSpec.WorkerNodeGroupConfigurations()[0].KubernetesVersion = &kube125 + newClusterSpec.Cluster.Spec.KubernetesVersion = kube125 + g.Expect(assertion(newClusterSpec)).To(gomega.MatchError(gomega.ContainSubstring("minimum hardware count not met for selector '{\"type\":\"worker\"}'"))) +} + func TestAssertionsForScaleUpDown_ExternalEtcdErrorFails(t *testing.T) { g := gomega.NewWithT(t) diff --git a/pkg/providers/tinkerbell/template_test.go b/pkg/providers/tinkerbell/template_test.go index 0229d72591701..f2408e0599c3c 100644 --- a/pkg/providers/tinkerbell/template_test.go +++ b/pkg/providers/tinkerbell/template_test.go @@ -56,3 +56,54 @@ func TestGenerateTemplateBuilder(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) g.Expect(gotEtcdMachineSpec).To(Equal(expectedEtcdMachineSpec)) } + +func TestGenerateTemplateBuilderForMachineConfigOsImageURL(t *testing.T) { + g := NewWithT(t) + testFile := "testdata/cluster_osimage_machine_config.yaml" + clusterSpec := test.NewFullClusterSpec(t, testFile) + + expectedControlPlaneMachineSpec := &v1alpha1.TinkerbellMachineConfigSpec{ + HardwareSelector: map[string]string{"type": "cp"}, + TemplateRef: v1alpha1.Ref{ + Kind: "TinkerbellTemplateConfig", + Name: "tink-test", + }, + OSFamily: "ubuntu", + OSImageURL: "https://ubuntu.gz", + Users: []v1alpha1.UserConfiguration{ + { + Name: "tink-user", + SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ=="}, + }, + }, + } + gotExpectedControlPlaneMachineSpec, err := getControlPlaneMachineSpec(clusterSpec) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gotExpectedControlPlaneMachineSpec).To(Equal(expectedControlPlaneMachineSpec)) + + expectedWorkerNodeGroupMachineSpec := map[string]v1alpha1.TinkerbellMachineConfigSpec{ + "test-md": { + HardwareSelector: map[string]string{"type": "worker"}, + TemplateRef: v1alpha1.Ref{ + Kind: "TinkerbellTemplateConfig", + Name: "tink-test", + }, + OSFamily: "ubuntu", + OSImageURL: "https://ubuntu.gz", + Users: []v1alpha1.UserConfiguration{ + { + Name: "tink-user", + SshAuthorizedKeys: []string{"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com"}, + }, + }, + }, + } + gotWorkerNodeGroupMachineSpec, err := getWorkerNodeGroupMachineSpec(clusterSpec) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gotWorkerNodeGroupMachineSpec).To(Equal(expectedWorkerNodeGroupMachineSpec)) + + gotEtcdMachineSpec, err := getEtcdMachineSpec(clusterSpec) + var expectedEtcdMachineSpec *v1alpha1.TinkerbellMachineConfigSpec + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gotEtcdMachineSpec).To(Equal(expectedEtcdMachineSpec)) +} diff --git a/pkg/providers/tinkerbell/upgrade.go b/pkg/providers/tinkerbell/upgrade.go index 7f59b93dc5ef3..4773e23a14fa6 100644 --- a/pkg/providers/tinkerbell/upgrade.go +++ b/pkg/providers/tinkerbell/upgrade.go @@ -150,10 +150,11 @@ func (p *Provider) validateAvailableHardwareForUpgrade(ctx context.Context, curr currentTinkerbellSpec := NewClusterSpec(currentSpec, currentSpec.TinkerbellMachineConfigs, currentSpec.TinkerbellDatacenter) rollingUpgrade := p.isRollingUpgrade(currentSpec, newClusterSpec) currentCluster := &ValidatableTinkerbellClusterSpec{currentTinkerbellSpec} - if rollingUpgrade { + if rollingUpgrade || eksaVersionUpgrade { clusterSpecValidator.Register(ExtraHardwareAvailableAssertionForRollingUpgrade(p.catalogue, currentCluster, eksaVersionUpgrade)) } - clusterSpecValidator.Register(AssertionsForScaleUpDown(p.catalogue, currentCluster, rollingUpgrade)) + // ScaleUp should not be supported in case of either rolling upgrade or eksa version upgrade. + clusterSpecValidator.Register(AssertionsForScaleUpDown(p.catalogue, currentCluster, rollingUpgrade || eksaVersionUpgrade)) tinkerbellClusterSpec := NewClusterSpec(newClusterSpec, p.machineConfigs, p.datacenterConfig) diff --git a/pkg/providers/tinkerbell/upgrade_test.go b/pkg/providers/tinkerbell/upgrade_test.go index c5e05f44c04ea..a76ba56378c70 100644 --- a/pkg/providers/tinkerbell/upgrade_test.go +++ b/pkg/providers/tinkerbell/upgrade_test.go @@ -21,6 +21,7 @@ import ( "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/filewriter" filewritermocks "github.com/aws/eks-anywhere/pkg/filewriter/mocks" + "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/hardware" "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/mocks" "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/rufiounreleased" "github.com/aws/eks-anywhere/pkg/providers/tinkerbell/stack" @@ -969,3 +970,97 @@ func TestProvider_ValidateNewSpec_NewWorkerNodeGroup(t *testing.T) { t.Fatal(err) } } + +func TestProviderValidateAvailableHardwareOnlyCPUpgradeSuccess(t *testing.T) { + clusterSpecManifest := "cluster_osimage_machine_config.yaml" + mockCtrl := gomock.NewController(t) + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + machineConfigs := givenMachineConfigs(t, clusterSpecManifest) + docker := stackmocks.NewMockDocker(mockCtrl) + helm := stackmocks.NewMockHelm(mockCtrl) + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl) + writer := filewritermocks.NewMockFileWriter(mockCtrl) + ctx := context.Background() + provider := newTinkerbellProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl) + provider.stackInstaller = stackInstaller + + clusterSpec.ManagementCluster = &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"} + clusterSpec.Cluster.Spec.ManagementCluster = v1alpha1.ManagementCluster{Name: "test-mgmt"} + catalogue := hardware.NewCatalogue() + newCluster := clusterSpec.DeepCopy() + newCluster.Cluster.Spec.KubernetesVersion = v1alpha1.Kube122 + _ = catalogue.InsertHardware(&tinkv1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "cp"}, + }}) + provider.catalogue = catalogue + err := provider.validateAvailableHardwareForUpgrade(ctx, clusterSpec, newCluster) + if err != nil { + t.Fatal(err) + } +} + +func TestProviderValidateAvailableHardwareOnlyWorkerUpgradeSuccess(t *testing.T) { + clusterSpecManifest := "cluster_osimage_machine_config.yaml" + mockCtrl := gomock.NewController(t) + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + machineConfigs := givenMachineConfigs(t, clusterSpecManifest) + docker := stackmocks.NewMockDocker(mockCtrl) + helm := stackmocks.NewMockHelm(mockCtrl) + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl) + writer := filewritermocks.NewMockFileWriter(mockCtrl) + ctx := context.Background() + provider := newTinkerbellProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl) + provider.stackInstaller = stackInstaller + + clusterSpec.ManagementCluster = &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"} + clusterSpec.Cluster.Spec.ManagementCluster = v1alpha1.ManagementCluster{Name: "test-mgmt"} + catalogue := hardware.NewCatalogue() + newCluster := clusterSpec.DeepCopy() + kube122 := v1alpha1.Kube122 + newCluster.Cluster.Spec.WorkerNodeGroupConfigurations[0].KubernetesVersion = &kube122 + _ = catalogue.InsertHardware(&tinkv1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "worker"}, + }}) + provider.catalogue = catalogue + err := provider.validateAvailableHardwareForUpgrade(ctx, clusterSpec, newCluster) + if err != nil { + t.Fatal(err) + } +} + +func TestProviderValidateAvailableHardwareEksaVersionUpgradeSuccess(t *testing.T) { + clusterSpecManifest := "cluster_osimage_machine_config.yaml" + mockCtrl := gomock.NewController(t) + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + machineConfigs := givenMachineConfigs(t, clusterSpecManifest) + docker := stackmocks.NewMockDocker(mockCtrl) + helm := stackmocks.NewMockHelm(mockCtrl) + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl) + writer := filewritermocks.NewMockFileWriter(mockCtrl) + ctx := context.Background() + provider := newTinkerbellProvider(datacenterConfig, machineConfigs, clusterSpec.Cluster, writer, docker, helm, kubectl) + provider.stackInstaller = stackInstaller + + clusterSpec.ManagementCluster = &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"} + clusterSpec.Cluster.Spec.ManagementCluster = v1alpha1.ManagementCluster{Name: "test-mgmt"} + catalogue := hardware.NewCatalogue() + newCluster := clusterSpec.DeepCopy() + newCluster.Bundles.Spec.Number++ + _ = catalogue.InsertHardware(&tinkv1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "cp"}, + }}) + _ = catalogue.InsertHardware(&tinkv1.Hardware{ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"type": "worker"}, + }}) + provider.catalogue = catalogue + err := provider.validateAvailableHardwareForUpgrade(ctx, clusterSpec, newCluster) + if err != nil { + t.Fatal(err) + } +} diff --git a/pkg/providers/tinkerbell/validate.go b/pkg/providers/tinkerbell/validate.go index e53b4eb436b27..a614cfc56ab71 100644 --- a/pkg/providers/tinkerbell/validate.go +++ b/pkg/providers/tinkerbell/validate.go @@ -37,10 +37,10 @@ func validateOsFamily(spec *ClusterSpec) error { func validateOSImageURLDontOverlap(spec *ClusterSpec) error { var dcOSImageURLfound bool - if spec.TinkerbellDatacenter.Spec.OSImageURL != "" { + if spec.DatacenterConfig.Spec.OSImageURL != "" { dcOSImageURLfound = true } - return validateMachineCfgOSImageURL(spec.TinkerbellMachineConfigs, dcOSImageURLfound) + return validateMachineCfgOSImageURL(spec.MachineConfigs, dcOSImageURLfound) } func validateMachineCfgOSImageURL(machineConfigs map[string]*v1alpha1.TinkerbellMachineConfig, dataCenterOSImageURLfound bool) error {