diff --git a/designs/handle-autoscaler-cluster-status-reconciliation.md b/designs/handle-autoscaler-cluster-status-reconciliation.md new file mode 100644 index 000000000000..963d87fd2f6e --- /dev/null +++ b/designs/handle-autoscaler-cluster-status-reconciliation.md @@ -0,0 +1,91 @@ +# Handle cluster status reconciliation in EKS Anywhere controller for Autoscaling Configuration + +## Problem Statement + +When a customer configures the [autoscaling](https://anywhere.eks.amazonaws.com/docs/getting-started/optional/autoscaling/) configuration for any of their worker node group, the number of worker nodes created in the cluster will be handled by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) in order to ensure that all pods have a place to run and there are no unneeded nodes. The cluster autoscaler also updates the replicas in the corresponding machine deployment object to match the actual number of machines provisioned. When the EKS-A controller reconciles the cluster status, it sees that the expected count of worker nodes does not match the observed count and marks the `WorkersReady` condition to False with the following message `Scaling down worker nodes, 1 expected (10 actual)` This is because it gets the expected count from the worker node groups count in the cluster spec which is set by the customer during cluster creation or upgrade whereas the actual replicas are handled by the autoscaler. This doc discusses various options to fix this issue with the EKS Anywhere controller cluster status reconciliation for worker node groups with autoscaling configured. + +## Overview of Solution + +#### Handling cluster status reconciliation + +Update the [totalExpected](https://github.com/aws/eks-anywhere/blob/a2a19920f4b7b54f6bc21f608ee5ecd5c6f0c45b/pkg/controller/clusters/status.go#L202) count of worker nodes to be equal to the count of worker nodes specified in the cluster spec *only* for worker node groups without autoscaling configured. For worker node groups which are configured with autoscaling, we include another validation for the `workersReady` condition which checks that the number of replicas lies between the minCount and maxCount specified in the autoscaler configuration. This validation will be done only after all the existing validations are done for worker node groups without autoscaling configured. + +#### Handling cluster spec updates + +When cluster spec is applied during cluster create/upgrade, we will not set the replicas in the md template for the worker node groups which have autoscaling configured. It will be defaulted to the minCount specified in the autoscaling configuration for new md objects during cluster creation whereas for cluster upgrades, it will be the same as the old md object’s replicas field value. + +**Pros:** + +* Removing the dependency on worker node group count for cluster creation too +* Worker node count is ignored which is what we want because autoscaler should handle it + +**Cons:** + +* Source of truth for worker nodes count would be md replicas which is not coming from an object that we own + +#### Testing + +E2E test will be added to test the worker node groups configured with autoscaler for cluster upgrades + +#### Documentation + +We need to explicitly document that the count will be ignored for all the worker node groups configuration which have autoscaling configured in the cluster spec for both cluster creation as well as upgrade. + +## Alternate Solutions Considered + +Here, option number corresponds to options for cluster status reconciliation (2 options) +Here, option letter corresponds to options for cluster spec updates (3 options) + +### **Option 1a** + +#### Handling cluster status reconciliation + +For each worker node group, if the [count](https://anywhere.eks.amazonaws.com/docs/getting-started/vsphere/vsphere-spec/#workernodegroupconfigurationscount-required) in the worker node group configuration for the cluster object is not equal to the replicas field in the machine deployment object, update the count to match it to the number of md replicas. This will be implemented in the MachineDeploymentReconciler in the EKS Anywhere controller. + +#### Handling cluster spec updates + +When cluster spec is applied during cluster create/upgrade, we will not set the replicas in the md template for the worker node groups which have autoscaling configured. It will be defaulted to the minCount specified in the autoscaling configuration for new md objects during cluster creation whereas for cluster upgrades, it will be the same as the old md object’s replicas field value. + +### **Option 1b:** + +#### Handling cluster status reconciliation + +For each worker node group, if the [count](https://anywhere.eks.amazonaws.com/docs/getting-started/vsphere/vsphere-spec/#workernodegroupconfigurationscount-required) in the worker node group configuration for the cluster object is not equal to the replicas field in the machine deployment object, update the count to match it to the number of md replicas. This will be implemented in the MachineDeploymentReconciler in the EKS Anywhere controller. + +#### Handling cluster spec updates + +We will deny any updates to the worker node count in the webhook if the autoscaling configuration is set. This will ensure that the md object is not re-applied by the controller to avoid updating the replicas field which should be handled by the autoscaler only. + +### **Option 1c:** + +#### Handling cluster status reconciliation + +For each worker node group, if the [count](https://anywhere.eks.amazonaws.com/docs/getting-started/vsphere/vsphere-spec/#workernodegroupconfigurationscount-required) in the worker node group configuration for the cluster object is not equal to the replicas field in the machine deployment object, update the count to match it to the number of md replicas. This will be implemented in the _MachineDeploymentReconciler_ in the EKS Anywhere controller. + +#### Handling cluster spec updates + +For each worker node group, if the [count](https://anywhere.eks.amazonaws.com/docs/getting-started/vsphere/vsphere-spec/#workernodegroupconfigurationscount-required) in the worker node group configuration for the cluster object is not equal to the replicas field in the machine deployment object, update the count to match it to the number of md replicas. This will be implemented in the _ClusterReconciler_ in the EKS Anywhere controller. + +### Option 2a: + +#### Handling cluster status reconciliation + +Update the [totalExpected](https://github.com/aws/eks-anywhere/blob/a2a19920f4b7b54f6bc21f608ee5ecd5c6f0c45b/pkg/controller/clusters/status.go#L202) count of worker nodes to be equal to the count of worker nodes specified in the cluster spec *only* for worker node groups without autoscaling configured. For worker node groups which are configured with autoscaling, we include another validation for the `workersReady` condition which checks that the number of replicas lies between the minCount and maxCount specified in the autoscaler configuration. This validation will be done only after all the existing validations are done for worker node groups without autoscaling configured. + +#### Handling cluster spec updates + +We will deny any updates to the worker node count in the webhook if the autoscaling configuration is set. This will ensure that the md object is not re-applied by the controller to avoid updating the replicas field which should be handled by the autoscaler only. + +Proposed solution is better than this option because it does not force the user to remove their autoscaling configuration if they decide to make an update to the worker nodes count and not rely on the autoscaler anymore. + +### Option 2b: + +#### Handling cluster status reconciliation + +Update the [totalExpected](https://github.com/aws/eks-anywhere/blob/a2a19920f4b7b54f6bc21f608ee5ecd5c6f0c45b/pkg/controller/clusters/status.go#L202) count of worker nodes to be equal to the count of worker nodes specified in the cluster spec *only* for worker node groups without autoscaling configured. For worker node groups which are configured with autoscaling, we include another validation for the `workersReady` condition which checks that the number of replicas lies between the minCount and maxCount specified in the autoscaler configuration. This validation will be done only after all the existing validations are done for worker node groups without autoscaling configured. + +#### Handling cluster spec updates + +For each worker node group, if the [count](https://anywhere.eks.amazonaws.com/docs/getting-started/vsphere/vsphere-spec/#workernodegroupconfigurationscount-required) in the worker node group configuration for the cluster object is not equal to the replicas field in the machine deployment object, update the count to match it to the number of md replicas. This will be implemented in the _ClusterReconciler_ in the EKS Anywhere controller. + +Option 1c is better than this option because it can use the same function for updating the count in both MachineDeploymentReconciler as well as ClusterReconciler and also it does not have to change any logic for the cluster status reconciliation. diff --git a/docs/content/en/docs/clustermgmt/cluster-status.md b/docs/content/en/docs/clustermgmt/cluster-status.md index 4f89f269969f..9739cad8dabb 100755 --- a/docs/content/en/docs/clustermgmt/cluster-status.md +++ b/docs/content/en/docs/clustermgmt/cluster-status.md @@ -77,7 +77,10 @@ Conditions provide a high-level status report representing an assessment of clus * `DefaultCNIConfigured` - reports the configuration state of the default CNI specified in the cluster specifications. It will be marked as `True` once the default CNI has been successfully configured on the cluster. However, if the EKS Anywhere default cilium CNI has been [configured to skip upgrades]({{< relref "../getting-started/optional/cni/#use-a-custom-cni" >}}) in the cluster specification, then this condition will be marked as `False` with the reason `SkipUpgradesForDefaultCNIConfigured`. - * `WorkersReady` - reports that the condition of the current state of worker machines versus the desired state specified in the Cluster specification. This condition is marked `True` once the number of worker nodes in the cluster match the expected number of worker nodes as defined in the cluster specifications and all the worker nodes are up to date and ready. + * `WorkersReady` - reports that the condition of the current state of worker machines versus the desired state specified in the Cluster specification. This condition is marked `True` once the following conditions are met: + * For worker node groups with [autoscaling]({{< relref "../getting-started/optional/autoscaling" >}}) configured, number of worker nodes in that group lies between the minCount and maxCount number of worker nodes as defined in the cluster specification. + * For fixed worker node groups, number of worker nodes in that group matches the expected number of worker nodes in those groups as defined in the cluster specification. + * All the worker nodes are up to date and ready. * `Ready` - reports a summary of the following conditions: `ControlPlaneInitialized`, `ControlPlaneReady`, and `WorkersReady`. It indicates an overall operational state of the EKS Anywhere cluster. It will be marked `True` once the current state of the cluster has fully reached the desired state specified in the Cluster spec. diff --git a/docs/content/en/docs/getting-started/baremetal/bare-spec.md b/docs/content/en/docs/getting-started/baremetal/bare-spec.md index dd009c9e4c7b..a5a4ee9b08e0 100644 --- a/docs/content/en/docs/getting-started/baremetal/bare-spec.md +++ b/docs/content/en/docs/getting-started/baremetal/bare-spec.md @@ -184,7 +184,7 @@ You can omit `workerNodeGroupConfigurations` when creating Bare Metal clusters. >**_NOTE:_** Empty `workerNodeGroupConfigurations` is not supported when Kubernetes version <= 1.21. ### workerNodeGroupConfigurations[*].count (optional) -Number of worker nodes. (default: `1`) Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. +Number of worker nodes. (default: `1`) It will be ignored if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and `autoscalingConfiguration` is used to specify the desired range of replicas. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. diff --git a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md index bec0ece419b8..150286d7db71 100644 --- a/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md +++ b/docs/content/en/docs/getting-started/cloudstack/cloud-spec.md @@ -235,7 +235,7 @@ This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. ### workerNodeGroupConfigurations[*].count (optional) -Number of worker nodes. (default: `1`) Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. +Number of worker nodes. (default: `1`) It will be ignored if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and `autoscalingConfiguration` is used to specify the desired range of replicas. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. diff --git a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md index 21d34309454f..a40ada591d97 100644 --- a/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md +++ b/docs/content/en/docs/getting-started/nutanix/nutanix-spec.md @@ -190,7 +190,7 @@ creation process are [here]({{< relref "./nutanix-prereq/#prepare-a-nutanix-envi This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. ### workerNodeGroupConfigurations[*].count (optional) -Number of worker nodes. (default: `1`) Optional if `autoscalingConfiguration` is used, in which case count will default to `autoscalingConfiguration.minCount`. +Number of worker nodes. (default: `1`) It will be ignored if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and `autoscalingConfiguration` is used to specify the desired range of replicas. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. diff --git a/docs/content/en/docs/getting-started/optional/autoscaling.md b/docs/content/en/docs/getting-started/optional/autoscaling.md index c9902483f95b..5da357a67f72 100644 --- a/docs/content/en/docs/getting-started/optional/autoscaling.md +++ b/docs/content/en/docs/getting-started/optional/autoscaling.md @@ -35,10 +35,9 @@ Configure an EKS Anywhere worker node group to be picked up by a Cluster Autosca machineGroupRef: kind: VSphereMachineConfig name: worker-machine-b - count: 1 ``` -Note that if no `count` is specified for the worker node group it will default to the `autoscalingConfiguration.minCount` value. +Note that if `count` is specified for the worker node group, it's value will be ignored during cluster creation as well as cluster upgrade. If only one of `minCount` or `maxCount` is specified, then the other will have a default value of `0` and `count` will have a default value of `minCount`. EKS Anywhere automatically applies the following annotations to your `MachineDeployment` objects for worker node groups with autoscaling enabled. The Cluster Autoscaler component uses these annotations to identify which node groups to autoscale. If a node group is not autoscaling as expected, check for these annotations on the `MachineDeployment` to troubleshoot. ``` diff --git a/docs/content/en/docs/getting-started/snow/snow-spec.md b/docs/content/en/docs/getting-started/snow/snow-spec.md index 76e40e4e4c82..32fb4d176ca1 100644 --- a/docs/content/en/docs/getting-started/snow/snow-spec.md +++ b/docs/content/en/docs/getting-started/snow/snow-spec.md @@ -147,7 +147,7 @@ This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. ### workerNodeGroupConfigurations[*].count (optional) -Number of worker nodes. (default: `1`) Optional if autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. +Number of worker nodes. (default: `1`) It will be ignored if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and `autoscalingConfiguration` is used to specify the desired range of replicas. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. diff --git a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md index bf4fbddb63bb..a9fb727685e2 100644 --- a/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md +++ b/docs/content/en/docs/getting-started/vsphere/vsphere-spec.md @@ -159,7 +159,7 @@ This takes in a list of node groups that you can define for your workers. You may define one or more worker node groups. ### workerNodeGroupConfigurations[*].count (optional) -Number of worker nodes. (default: `1`) Optional if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and autoscalingConfiguration is used, in which case count will default to `autoscalingConfiguration.minCount`. +Number of worker nodes. (default: `1`) It will be ignored if the [cluster autoscaler curated package]({{< relref "../../packages/cluster-autoscaler/addclauto" >}}) is installed and `autoscalingConfiguration` is used to specify the desired range of replicas. Refers to [troubleshooting machine health check remediation not allowed]({{< relref "../../troubleshooting/troubleshooting/#machine-health-check-shows-remediation-is-not-allowed" >}}) and choose a sufficient number to allow machine health check remediation. diff --git a/pkg/api/v1alpha1/condition_consts.go b/pkg/api/v1alpha1/condition_consts.go index 85fe677c2e65..93c2c9c16400 100644 --- a/pkg/api/v1alpha1/condition_consts.go +++ b/pkg/api/v1alpha1/condition_consts.go @@ -55,6 +55,9 @@ const ( // ExternalEtcdNotAvailable reports the Cluster status is waiting for Etcd to be available. ExternalEtcdNotAvailable = "ExternalEtcdNotAvailable" + + // AutoscalerConstraintNotMetReason reports the Cluster status is waiting for autoscaler constraint to be met. + AutoscalerConstraintNotMetReason = "AutoscalerConstraintNotMet" ) const ( diff --git a/pkg/clusterapi/autoscaler.go b/pkg/clusterapi/autoscaler.go index 9790b3600cc8..8a51fc24f496 100644 --- a/pkg/clusterapi/autoscaler.go +++ b/pkg/clusterapi/autoscaler.go @@ -8,9 +8,10 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" ) +// Autoscaler annotation constants. const ( - nodeGroupMinSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size" - nodeGroupMaxSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size" + NodeGroupMinSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size" + NodeGroupMaxSizeAnnotation = "cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size" ) func ConfigureAutoscalingInMachineDeployment(md *clusterv1.MachineDeployment, autoscalingConfig *anywherev1.AutoScalingConfiguration) { @@ -22,6 +23,6 @@ func ConfigureAutoscalingInMachineDeployment(md *clusterv1.MachineDeployment, au md.ObjectMeta.Annotations = map[string]string{} } - md.ObjectMeta.Annotations[nodeGroupMinSizeAnnotation] = strconv.Itoa(autoscalingConfig.MinCount) - md.ObjectMeta.Annotations[nodeGroupMaxSizeAnnotation] = strconv.Itoa(autoscalingConfig.MaxCount) + md.ObjectMeta.Annotations[NodeGroupMinSizeAnnotation] = strconv.Itoa(autoscalingConfig.MinCount) + md.ObjectMeta.Annotations[NodeGroupMaxSizeAnnotation] = strconv.Itoa(autoscalingConfig.MaxCount) } diff --git a/pkg/controller/clusters/status.go b/pkg/controller/clusters/status.go index 53456ae1057b..af406be58037 100644 --- a/pkg/controller/clusters/status.go +++ b/pkg/controller/clusters/status.go @@ -12,6 +12,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/controller" ) @@ -198,11 +199,17 @@ func updateWorkersReadyCondition(cluster *anywherev1.Cluster, machineDeployments } totalExpected := 0 + wngWithAutoScalingConfigurationMap := make(map[string]anywherev1.AutoScalingConfiguration) for _, wng := range cluster.Spec.WorkerNodeGroupConfigurations { - totalExpected += *wng.Count + // We want to consider only the worker node groups which don't have autoscaling configuration for expected worker nodes count. + if wng.AutoScalingConfiguration == nil { + totalExpected += *wng.Count + } else { + wngWithAutoScalingConfigurationMap[wng.Name] = *wng.AutoScalingConfiguration + } } - // First, we need aggregate the number of nodes across worker node groups to be able to assess the condition of the workers + // First, we need to aggregate the number of nodes across worker node groups to be able to assess the condition of the workers // as a whole. totalReadyReplicas := 0 totalUpdatedReplicas := 0 @@ -215,6 +222,13 @@ func updateWorkersReadyCondition(cluster *anywherev1.Cluster, machineDeployments return } + // Skip updating the replicas for the machine deployments which have autoscaling configuration annotation + if md.ObjectMeta.Annotations != nil { + if _, ok := md.ObjectMeta.Annotations[clusterapi.NodeGroupMinSizeAnnotation]; ok { + continue + } + } + totalReadyReplicas += int(md.Status.ReadyReplicas) totalUpdatedReplicas += int(md.Status.UpdatedReplicas) totalReplicas += int(md.Status.Replicas) @@ -253,6 +267,20 @@ func updateWorkersReadyCondition(cluster *anywherev1.Cluster, machineDeployments return } + // Iterating through the machine deployments which have autoscaling configured to check if the number of worker nodes replicas + // are between min count and max count specified in the cluster spec. + for _, md := range machineDeployments { + if wng, exists := wngWithAutoScalingConfigurationMap[md.ObjectMeta.Name]; exists { + minCount := wng.MinCount + maxCount := wng.MaxCount + replicas := int(md.Status.Replicas) + if replicas < minCount || replicas > maxCount { + conditions.MarkFalse(cluster, anywherev1.WorkersReadyCondition, anywherev1.AutoscalerConstraintNotMetReason, clusterv1.ConditionSeverityInfo, "Worker nodes count for %s not between %d and %d yet (%d actual)", md.ObjectMeta.Name, minCount, maxCount, replicas) + return + } + } + } + conditions.MarkTrue(cluster, anywherev1.WorkersReadyCondition) } diff --git a/pkg/controller/clusters/status_test.go b/pkg/controller/clusters/status_test.go index 9184a8b953cd..122d0a4ade7b 100644 --- a/pkg/controller/clusters/status_test.go +++ b/pkg/controller/clusters/status_test.go @@ -20,6 +20,7 @@ import ( _ "github.com/aws/eks-anywhere/internal/test/envtest" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/cluster" + "github.com/aws/eks-anywhere/pkg/clusterapi" "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/controller/clusters" "github.com/aws/eks-anywhere/pkg/utils/ptr" @@ -1009,7 +1010,6 @@ func TestUpdateClusterStatusForWorkers(t *testing.T) { Message: "Worker nodes not ready yet", }, }, - { name: "workers ready", workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{ @@ -1051,6 +1051,107 @@ func TestUpdateClusterStatusForWorkers(t *testing.T) { Status: "True", }, }, + { + name: "workers not ready, autoscaler constraint not met", + workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{ + { + Name: "md-0", + Count: ptr.Int(1), + }, + { + Name: "md-1", + AutoScalingConfiguration: &anywherev1.AutoScalingConfiguration{ + MinCount: 3, + MaxCount: 5, + }, + }, + }, + machineDeployments: []clusterv1.MachineDeployment{ + *test.MachineDeployment(func(md *clusterv1.MachineDeployment) { + md.ObjectMeta.Name = "md-0" + md.ObjectMeta.Labels = map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + } + md.Status.Replicas = 1 + md.Status.ReadyReplicas = 1 + md.Status.UpdatedReplicas = 1 + }), + *test.MachineDeployment(func(md *clusterv1.MachineDeployment) { + md.ObjectMeta.Name = "md-1" + md.ObjectMeta.Labels = map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + } + md.ObjectMeta.Annotations = map[string]string{ + clusterapi.NodeGroupMinSizeAnnotation: "3", + clusterapi.NodeGroupMaxSizeAnnotation: "5", + } + md.Status.Replicas = 1 + md.Status.ReadyReplicas = 1 + md.Status.UpdatedReplicas = 1 + }), + }, + conditions: []anywherev1.Condition{ + { + Type: anywherev1.ControlPlaneInitializedCondition, + Status: "True", + }, + }, + wantCondition: &anywherev1.Condition{ + Type: anywherev1.WorkersReadyCondition, + Status: "False", + Reason: anywherev1.AutoscalerConstraintNotMetReason, + Severity: clusterv1.ConditionSeverityInfo, + Message: "Worker nodes count for md-1 not between 3 and 5 yet (1 actual)", + }, + }, + { + name: "workers ready, autoscaler constraint met", + workerNodeGroupConfigurations: []anywherev1.WorkerNodeGroupConfiguration{ + { + Count: ptr.Int(1), + }, + { + AutoScalingConfiguration: &anywherev1.AutoScalingConfiguration{ + MinCount: 1, + MaxCount: 5, + }, + }, + }, + machineDeployments: []clusterv1.MachineDeployment{ + *test.MachineDeployment(func(md *clusterv1.MachineDeployment) { + md.ObjectMeta.Name = "md-0" + md.ObjectMeta.Labels = map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + } + md.Status.Replicas = 1 + md.Status.ReadyReplicas = 1 + md.Status.UpdatedReplicas = 1 + }), + *test.MachineDeployment(func(md *clusterv1.MachineDeployment) { + md.ObjectMeta.Name = "md-1" + md.ObjectMeta.Labels = map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + } + md.ObjectMeta.Annotations = map[string]string{ + clusterapi.NodeGroupMinSizeAnnotation: "1", + clusterapi.NodeGroupMaxSizeAnnotation: "5", + } + md.Status.Replicas = 1 + md.Status.ReadyReplicas = 1 + md.Status.UpdatedReplicas = 1 + }), + }, + conditions: []anywherev1.Condition{ + { + Type: anywherev1.ControlPlaneInitializedCondition, + Status: "True", + }, + }, + wantCondition: &anywherev1.Condition{ + Type: anywherev1.WorkersReadyCondition, + Status: "True", + }, + }, } for _, tt := range tests { @@ -1080,6 +1181,7 @@ func TestUpdateClusterStatusForWorkers(t *testing.T) { g.Expect(err).To(BeNil()) condition := conditions.Get(cluster, tt.wantCondition.Type) + fmt.Println(condition) g.Expect(condition).ToNot(BeNil()) g.Expect(condition.Type).To(Equal(tt.wantCondition.Type)) diff --git a/pkg/providers/cloudstack/config/template-md.yaml b/pkg/providers/cloudstack/config/template-md.yaml index 81df17e3cc60..44af6f6d28b4 100644 --- a/pkg/providers/cloudstack/config/template-md.yaml +++ b/pkg/providers/cloudstack/config/template-md.yaml @@ -122,14 +122,16 @@ metadata: cluster.x-k8s.io/cluster-name: {{.clusterName}} name: {{.workerNodeGroupName}} namespace: {{.eksaSystemNamespace}} - {{- if .autoscalingConfig }} +{{- if .autoscalingConfig }} annotations: cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ .autoscalingConfig.MinCount }}" cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ .autoscalingConfig.MaxCount }}" {{- end }} spec: clusterName: {{.clusterName}} +{{- if not .autoscalingConfig }} replicas: {{.workerReplicas}} +{{- end }} selector: matchLabels: {} template: diff --git a/pkg/providers/cloudstack/testdata/expected_results_main_autoscaling_md.yaml b/pkg/providers/cloudstack/testdata/expected_results_main_autoscaling_md.yaml index a4c30a6cd046..a69af7639a2a 100644 --- a/pkg/providers/cloudstack/testdata/expected_results_main_autoscaling_md.yaml +++ b/pkg/providers/cloudstack/testdata/expected_results_main_autoscaling_md.yaml @@ -73,7 +73,6 @@ metadata: cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" spec: clusterName: test - replicas: 3 selector: matchLabels: {} template: diff --git a/pkg/providers/docker/config/template-md.yaml b/pkg/providers/docker/config/template-md.yaml index 94d8867e7437..c1f8c344072d 100644 --- a/pkg/providers/docker/config/template-md.yaml +++ b/pkg/providers/docker/config/template-md.yaml @@ -76,7 +76,9 @@ metadata: {{- end }} spec: clusterName: {{.clusterName}} +{{- if not .autoscalingConfig }} replicas: {{.workerReplicas}} +{{- end }} selector: matchLabels: null template: diff --git a/pkg/providers/docker/testdata/valid_autoscaler_deployment_md_expected.yaml b/pkg/providers/docker/testdata/valid_autoscaler_deployment_md_expected.yaml index 57092cdf7f8a..c0aa5ac7c170 100644 --- a/pkg/providers/docker/testdata/valid_autoscaler_deployment_md_expected.yaml +++ b/pkg/providers/docker/testdata/valid_autoscaler_deployment_md_expected.yaml @@ -25,7 +25,6 @@ metadata: cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" spec: clusterName: test-cluster - replicas: 3 selector: matchLabels: null template: diff --git a/pkg/providers/nutanix/config/md-template.yaml b/pkg/providers/nutanix/config/md-template.yaml index d62afebeb444..5b12f8b5e46b 100644 --- a/pkg/providers/nutanix/config/md-template.yaml +++ b/pkg/providers/nutanix/config/md-template.yaml @@ -5,14 +5,16 @@ metadata: cluster.x-k8s.io/cluster-name: "{{.clusterName}}" name: "{{.workerNodeGroupName}}" namespace: "{{.eksaSystemNamespace}}" - {{- if .autoscalingConfig }} +{{- if .autoscalingConfig }} annotations: cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ .autoscalingConfig.MinCount }}" cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ .autoscalingConfig.MaxCount }}" - {{- end }} +{{- end }} spec: clusterName: "{{.clusterName}}" +{{- if not .autoscalingConfig }} replicas: {{.workerReplicas}} +{{- end }} selector: matchLabels: {} template: diff --git a/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml b/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml index 3a2a9fc1eba3..36350779f950 100644 --- a/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml @@ -10,7 +10,6 @@ metadata: cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" spec: clusterName: "eksa-unit-test" - replicas: 3 selector: matchLabels: {} template: diff --git a/pkg/providers/tinkerbell/config/template-md.yaml b/pkg/providers/tinkerbell/config/template-md.yaml index 83e39ed128df..726b7a4eea5b 100644 --- a/pkg/providers/tinkerbell/config/template-md.yaml +++ b/pkg/providers/tinkerbell/config/template-md.yaml @@ -13,7 +13,9 @@ metadata: {{- end }} spec: clusterName: {{.clusterName}} +{{- if not .autoscalingConfig }} replicas: {{.workerReplicas}} +{{- end }} selector: matchLabels: {} template: diff --git a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_autoscaler_md.yaml b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_autoscaler_md.yaml index 38be17858050..eab49bb2312b 100644 --- a/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_autoscaler_md.yaml +++ b/pkg/providers/tinkerbell/testdata/expected_results_cluster_tinkerbell_autoscaler_md.yaml @@ -11,7 +11,6 @@ metadata: cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" spec: clusterName: test - replicas: 1 selector: matchLabels: {} template: diff --git a/pkg/providers/vsphere/config/template-md.yaml b/pkg/providers/vsphere/config/template-md.yaml index 9427538701b4..5a568a312dc4 100644 --- a/pkg/providers/vsphere/config/template-md.yaml +++ b/pkg/providers/vsphere/config/template-md.yaml @@ -151,7 +151,9 @@ metadata: {{- end }} spec: clusterName: {{.clusterName}} +{{- if not .autoscalingConfig }} replicas: {{.workerReplicas}} +{{- end }} selector: matchLabels: {} template: diff --git a/pkg/providers/vsphere/testdata/expected_results_minimal_autoscaling_md.yaml b/pkg/providers/vsphere/testdata/expected_results_minimal_autoscaling_md.yaml index 0d8968d4bc8d..b9918819ddb0 100644 --- a/pkg/providers/vsphere/testdata/expected_results_minimal_autoscaling_md.yaml +++ b/pkg/providers/vsphere/testdata/expected_results_minimal_autoscaling_md.yaml @@ -41,7 +41,6 @@ metadata: cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" spec: clusterName: test - replicas: 3 selector: matchLabels: {} template: diff --git a/test/e2e/autoscaler.go b/test/e2e/autoscaler.go index cf62a65ad926..d31aab543cd9 100644 --- a/test/e2e/autoscaler.go +++ b/test/e2e/autoscaler.go @@ -29,3 +29,23 @@ func runAutoscalerWithMetricsServerTinkerbellSimpleFlow(test *framework.ClusterE test.DeleteCluster() test.ValidateHardwareDecommissioned() } + +func runAutoscalerUpgradeFlow(test *framework.MulticlusterE2ETest) { + test.CreateManagementClusterWithConfig() + test.RunInWorkloadClusters(func(e *framework.WorkloadCluster) { + e.GenerateClusterConfig() + e.CreateCluster() + autoscalerName := "cluster-autoscaler" + targetNamespace := "eksa-system" + mgmtCluster := withCluster(test.ManagementCluster) + workloadCluster := withCluster(e.ClusterE2ETest) + test.ManagementCluster.InstallAutoScaler(e.ClusterName, targetNamespace) + test.ManagementCluster.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster) + e.T.Log("Cluster Autoscaler ready") + e.DeployTestWorkload(workloadCluster) + test.ManagementCluster.RestartClusterAutoscaler(targetNamespace) + e.VerifyWorkerNodesScaleUp(mgmtCluster) + e.DeleteCluster() + }) + test.DeleteManagementCluster() +} diff --git a/test/e2e/vsphere_test.go b/test/e2e/vsphere_test.go index 2b2beb519bb9..348edb253b67 100644 --- a/test/e2e/vsphere_test.go +++ b/test/e2e/vsphere_test.go @@ -830,6 +830,47 @@ func TestVSphereKubernetes125UbuntuCuratedPackagesPrometheusSimpleFlow(t *testin runCuratedPackagesPrometheusInstallSimpleFlow(test) } +func TestVSphereKubernetes129BottleRocketWorkloadClusterCuratedPackagesClusterAutoscalerUpgradeFlow(t *testing.T) { + minNodes := 1 + maxNodes := 2 + framework.CheckCuratedPackagesCredentials(t) + provider := framework.NewVSphere(t, framework.WithBottleRocket129()) + test := framework.NewMulticlusterE2ETest( + t, + framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithExternalEtcdTopology(1), + ), + ), + framework.NewClusterE2ETest( + t, + provider, + framework.WithClusterFiller( + api.WithKubernetesVersion(v1alpha1.Kube129), + api.WithControlPlaneCount(1), + api.WithWorkerNodeCount(1), + api.WithExternalEtcdTopology(1), + api.WithWorkerNodeAutoScalingConfig(minNodes, maxNodes), + ), + framework.WithPackageConfig( + t, + packageBundleURI(v1alpha1.Kube129), + EksaPackageControllerHelmChartName, + EksaPackageControllerHelmURI, + EksaPackageControllerHelmVersion, + EksaPackageControllerHelmValues, + nil, + ), + ), + ) + runAutoscalerUpgradeFlow(test) +} + func TestVSphereKubernetes126UbuntuCuratedPackagesPrometheusSimpleFlow(t *testing.T) { framework.CheckCuratedPackagesCredentials(t) test := framework.NewClusterE2ETest(t, diff --git a/test/framework/cluster.go b/test/framework/cluster.go index 76c9cd910249..84c00a0adb1b 100644 --- a/test/framework/cluster.go +++ b/test/framework/cluster.go @@ -1989,36 +1989,55 @@ func (e *ClusterE2ETest) InstallAutoScalerWithMetricServer(targetNamespace strin } } -// CombinedAutoScalerMetricServerTest verifies that new nodes are spun up after using a HPA to scale a deployment. -func (e *ClusterE2ETest) CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace string, mgmtCluster *types.Cluster) { +//go:embed testdata/autoscaler_package_workload_cluster.yaml +var autoscalerPackageWorkloadClusterDeploymentTemplate string + +// InstallAutoScaler installs autoscaler with a given target namespace. +func (e *ClusterE2ETest) InstallAutoScaler(workloadClusterName, targetNamespace string) { ctx := context.Background() - machineDeploymentName := e.ClusterName + "-" + "md-0" - autoscalerDeploymentName := "cluster-autoscaler-clusterapi-cluster-autoscaler" + packageMetadataNamespace := fmt.Sprintf("%s-%s", constants.EksaPackagesName, e.ClusterName) + data := map[string]interface{}{ + "targetNamespace": targetNamespace, + "workloadClusterName": workloadClusterName, + } + + autoscalerPackageWorkloadClusterDeployment, err := templater.Execute(autoscalerPackageWorkloadClusterDeploymentTemplate, data) + if err != nil { + e.T.Fatalf("Failed creating autoscaler Package Deployment: %s", err) + } + err = e.KubectlClient.ApplyKubeSpecFromBytesWithNamespace(ctx, e.Cluster(), autoscalerPackageWorkloadClusterDeployment, + packageMetadataNamespace) + if err != nil { + e.T.Fatalf("Error installing cluster autoscaler package: %s", err) + } +} + +// CombinedAutoScalerMetricServerTest verifies that new nodes are spun up after using a HPA to scale a deployment. +func (e *ClusterE2ETest) CombinedAutoScalerMetricServerTest(autoscalerName, metricServerName, targetNamespace string, mgmtCluster *types.Cluster) { e.VerifyMetricServerPackageInstalled(metricServerName, targetNamespace, mgmtCluster) e.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster) e.T.Log("Metrics Server and Cluster Autoscaler ready") + e.DeployTestWorkload(mgmtCluster) + e.VerifyWorkerNodesScaleUp(mgmtCluster) +} +// DeployTestWorkload deploys the test workload on the cluster. +func (e *ClusterE2ETest) DeployTestWorkload(cluster *types.Cluster) { e.T.Log("Deploying test workload") - err := e.KubectlClient.ApplyKubeSpecFromBytes(ctx, mgmtCluster, autoscalerLoad) + err := e.KubectlClient.ApplyKubeSpecFromBytes(context.Background(), cluster, autoscalerLoad) if err != nil { e.T.Fatalf("Failed to apply autoscaler load %s", err) } +} - // There is a bug in cluster autoscaler currently where it's not able to autoscale the cluster - // because of missing permissions on infrastructure machine template. - // Cluster Autoscaler does restart after ~10 min after which it starts functioning normally. - // We are force triggering a restart so the e2e doesn't have to wait 10 min for the restart. - // This can be removed once the following issue is resolve upstream. - // https://github.com/kubernetes/autoscaler/issues/6490 - _, err = e.KubectlClient.ExecuteCommand(ctx, "rollout", "restart", "deployment", "-n", targetNamespace, autoscalerDeploymentName, "--kubeconfig", e.KubeconfigFilePath()) - if err != nil { - e.T.Fatalf("Failed to rollout cluster autoscaler %s", err) - } - e.VerifyAutoScalerPackageInstalled(autoscalerName, targetNamespace, mgmtCluster) +// VerifyWorkerNodesScaleUp verifies that the worker nodes are scaled up after a test workload is deployed on a cluster with Autoscaler installed. +func (e *ClusterE2ETest) VerifyWorkerNodesScaleUp(mgmtCluster *types.Cluster) { + ctx := context.Background() + machineDeploymentName := e.ClusterName + "-" + "md-0" e.T.Log("Waiting for machinedeployment to begin scaling up") - err = e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "10m", "status.phase", "ScalingUp", + err := e.KubectlClient.WaitJSONPathLoop(ctx, mgmtCluster.KubeconfigFile, "10m", "status.phase", "ScalingUp", fmt.Sprintf("machinedeployments.cluster.x-k8s.io/%s", machineDeploymentName), constants.EksaSystemNamespace) if err != nil { e.T.Fatalf("Failed to get ScalingUp phase for machinedeployment: %s", err) @@ -2031,8 +2050,7 @@ func (e *ClusterE2ETest) CombinedAutoScalerMetricServerTest(autoscalerName, metr e.T.Fatalf("Failed to get Running phase for machinedeployment: %s", err) } - err = e.KubectlClient.WaitForMachineDeploymentReady(ctx, mgmtCluster, "5m", - machineDeploymentName) + err = e.KubectlClient.WaitForMachineDeploymentReady(ctx, mgmtCluster, "5m", machineDeploymentName) if err != nil { e.T.Fatalf("Machine deployment stuck in scaling up: %s", err) } @@ -2040,6 +2058,21 @@ func (e *ClusterE2ETest) CombinedAutoScalerMetricServerTest(autoscalerName, metr e.T.Log("Finished scaling up machines") } +// RestartClusterAutoscaler restarts the cluster autoscaler deployment in the target namespace. +func (e *ClusterE2ETest) RestartClusterAutoscaler(targetNamespace string) { + // There is a bug in cluster autoscaler currently where it's not able to autoscale the cluster + // because of missing permissions on infrastructure machine template. + // Cluster Autoscaler does restart after ~10 min after which it starts functioning normally. + // We are force triggering a restart so the e2e doesn't have to wait 10 min for the restart. + // This can be removed once the following issue is resolve upstream. + // https://github.com/kubernetes/autoscaler/issues/6490 + autoscalerDeploymentName := "cluster-autoscaler-clusterapi-cluster-autoscaler" + _, err := e.KubectlClient.ExecuteCommand(context.Background(), "rollout", "restart", "deployment", "-n", targetNamespace, autoscalerDeploymentName, "--kubeconfig", e.KubeconfigFilePath()) + if err != nil { + e.T.Fatalf("Failed to rollout cluster autoscaler %s", err) + } +} + // ValidateClusterState runs a set of validations against the cluster to identify an invalid cluster state. func (e *ClusterE2ETest) ValidateClusterState() { validateClusterState(e.T.(*testing.T), e) diff --git a/test/framework/testdata/autoscaler_package_workload_cluster.yaml b/test/framework/testdata/autoscaler_package_workload_cluster.yaml new file mode 100644 index 000000000000..f91b46ea29e0 --- /dev/null +++ b/test/framework/testdata/autoscaler_package_workload_cluster.yaml @@ -0,0 +1,15 @@ +apiVersion: packages.eks.amazonaws.com/v1alpha1 +kind: Package +metadata: + name: cluster-autoscaler +spec: + packageName: cluster-autoscaler + targetNamespace: {{.targetNamespace}} + config: |- + cloudProvider: "clusterapi" + clusterAPIMode: "kubeconfig-incluster" + clusterAPIKubeconfigSecret: "{{.workloadClusterName}}-kubeconfig" + autoDiscovery: + clusterName: {{.workloadClusterName}} + +---