diff --git a/Makefile b/Makefile index 42f99a0be0e3f..2a2ef60674978 100644 --- a/Makefile +++ b/Makefile @@ -603,6 +603,7 @@ mocks: ## Generate mocks ${MOCKGEN} -destination=pkg/controller/clusters/mocks/ipvalidator.go -package=mocks -source "pkg/controller/clusters/ipvalidator.go" IPUniquenessValidator ${MOCKGEN} -destination=pkg/registry/mocks/storage.go -package=mocks -source "pkg/registry/storage.go" StorageClient ${MOCKGEN} -destination=pkg/registry/mocks/repository.go -package=mocks oras.land/oras-go/v2/registry Repository + ${MOCKGEN} -destination=controllers/mocks/nodeupgrade_controller.go -package=mocks -source "controllers/nodeupgrade_controller.go" RemoteClientRegistry .PHONY: verify-mocks verify-mocks: mocks ## Verify if mocks need to be updated diff --git a/controllers/mocks/nodeupgrade_controller.go b/controllers/mocks/nodeupgrade_controller.go new file mode 100644 index 0000000000000..f8d6dd178a4be --- /dev/null +++ b/controllers/mocks/nodeupgrade_controller.go @@ -0,0 +1,51 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: controllers/nodeupgrade_controller.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + client "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MockRemoteClientRegistry is a mock of RemoteClientRegistry interface. +type MockRemoteClientRegistry struct { + ctrl *gomock.Controller + recorder *MockRemoteClientRegistryMockRecorder +} + +// MockRemoteClientRegistryMockRecorder is the mock recorder for MockRemoteClientRegistry. +type MockRemoteClientRegistryMockRecorder struct { + mock *MockRemoteClientRegistry +} + +// NewMockRemoteClientRegistry creates a new mock instance. +func NewMockRemoteClientRegistry(ctrl *gomock.Controller) *MockRemoteClientRegistry { + mock := &MockRemoteClientRegistry{ctrl: ctrl} + mock.recorder = &MockRemoteClientRegistryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRemoteClientRegistry) EXPECT() *MockRemoteClientRegistryMockRecorder { + return m.recorder +} + +// GetClient mocks base method. +func (m *MockRemoteClientRegistry) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClient", ctx, cluster) + ret0, _ := ret[0].(client.Client) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClient indicates an expected call of GetClient. +func (mr *MockRemoteClientRegistryMockRecorder) GetClient(ctx, cluster interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockRemoteClientRegistry)(nil).GetClient), ctx, cluster) +} diff --git a/controllers/nodeupgrade_controller.go b/controllers/nodeupgrade_controller.go index 30c336f735177..c23f5eee05671 100644 --- a/controllers/nodeupgrade_controller.go +++ b/controllers/nodeupgrade_controller.go @@ -25,12 +25,13 @@ import ( ) const ( - upgradeScript = "/foo/eksa-upgrades/scripts/upgrade.sh" + // TODO(in-place): Get this image from the bundle instead of using the hardcoded one. defaultUpgraderImage = "public.ecr.aws/t0n3a9y4/aws/upgrader:v1.28.3-eks-1-28-9" controlPlaneLabel = "node-role.kubernetes.io/control-plane" podDNEMessage = "Upgrader pod does not exist" - NodeUpgradeFinalizerName = "nodeupgrades.anywhere.eks.amazonaws.com/finalizer" + // nodeUpgradeFinalizerName is the finalizer added to NodeUpgrade objects to handle deletion. + nodeUpgradeFinalizerName = "nodeupgrades.anywhere.eks.amazonaws.com/finalizer" ) // RemoteClientRegistry defines methods for remote cluster controller clients. @@ -68,6 +69,7 @@ func (r *NodeUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups="cluster.x-k8s.io",resources=machines,verbs=list;watch;get;patch;update // Reconcile reconciles a NodeUpgrade object. +// nolint:gocyclo func (r *NodeUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) { log := r.log.WithValues("NodeUpgrade", req.NamespacedName) @@ -135,7 +137,7 @@ func (r *NodeUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) return r.reconcileDelete(ctx, log, nodeUpgrade, machineToBeUpgraded.Status.NodeRef.Name, rClient) } - controllerutil.AddFinalizer(nodeUpgrade, NodeUpgradeFinalizerName) + controllerutil.AddFinalizer(nodeUpgrade, nodeUpgradeFinalizerName) return r.reconcile(ctx, log, machineToBeUpgraded, nodeUpgrade, rClient) } @@ -215,7 +217,7 @@ func (r *NodeUpgradeReconciler) reconcileDelete(ctx context.Context, log logr.Lo } // Remove the finalizer from NodeUpgrade object - controllerutil.RemoveFinalizer(nodeUpgrade, NodeUpgradeFinalizerName) + controllerutil.RemoveFinalizer(nodeUpgrade, nodeUpgradeFinalizerName) return ctrl.Result{}, nil } @@ -240,7 +242,24 @@ func (r *NodeUpgradeReconciler) updateStatus(ctx context.Context, log logr.Logge } conditions.MarkTrue(nodeUpgrade, anywherev1.UpgraderPodCreated) + updateContainerConditions(pod, nodeUpgrade) + // Always update the readyCondition by summarizing the state of other conditions. + conditions.SetSummary(nodeUpgrade, + conditions.WithConditions( + anywherev1.UpgraderPodCreated, + anywherev1.BinariesCopied, + anywherev1.ContainerdUpgraded, + anywherev1.CNIPluginsUpgraded, + anywherev1.KubeadmUpgraded, + anywherev1.KubeletUpgraded, + anywherev1.PostUpgradeCleanupCompleted, + ), + ) + return nil +} + +func updateContainerConditions(pod *corev1.Pod, nodeUpgrade *anywherev1.NodeUpgrade) { containersMap := []struct { name string condition clusterv1.ConditionType @@ -298,22 +317,7 @@ func (r *NodeUpgradeReconciler) updateStatus(ctx context.Context, log logr.Logge } } } - nodeUpgrade.Status.Completed = completed - - // Always update the readyCondition by summarizing the state of other conditions. - conditions.SetSummary(nodeUpgrade, - conditions.WithConditions( - anywherev1.UpgraderPodCreated, - anywherev1.BinariesCopied, - anywherev1.ContainerdUpgraded, - anywherev1.CNIPluginsUpgraded, - anywherev1.KubeadmUpgraded, - anywherev1.KubeletUpgraded, - anywherev1.PostUpgradeCleanupCompleted, - ), - ) - return nil } func getInitContainerStatus(pod *corev1.Pod, containerName string) (*corev1.ContainerStatus, error) { diff --git a/controllers/nodeupgrade_controller_test.go b/controllers/nodeupgrade_controller_test.go new file mode 100644 index 0000000000000..172be0ebdf661 --- /dev/null +++ b/controllers/nodeupgrade_controller_test.go @@ -0,0 +1,111 @@ +package controllers_test + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/aws/eks-anywhere/controllers" + "github.com/aws/eks-anywhere/controllers/mocks" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + upgrader "github.com/aws/eks-anywhere/pkg/nodeupgrader" + "github.com/aws/eks-anywhere/pkg/utils/ptr" +) + +func TestNodeUpgradeReconciler(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) + + cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest() + client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build() + + clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil) + + r := controllers.NewNodeUpgradeReconciler(client, clientRegistry) + req := nodeUpgradeRequest(nodeUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).To(BeNil()) + + pod := &corev1.Pod{} + err = client.Get(ctx, types.NamespacedName{Name: upgrader.PodName(node.Name), Namespace: "eksa-system"}, pod) + g.Expect(err).To(BeNil()) +} + +func getObjectsForNodeUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *corev1.Node, *anywherev1.NodeUpgrade) { + cluster := generateCluster() + node := generateNode() + machine := generateMachine(cluster, node) + nodeUpgrade := generateNodeUpgrade(machine) + return cluster, machine, node, nodeUpgrade +} + +func nodeUpgradeRequest(nodeUpgrade *anywherev1.NodeUpgrade) reconcile.Request { + return reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: nodeUpgrade.Name, + Namespace: nodeUpgrade.Namespace, + }, + } +} + +func generateNodeUpgrade(machine *clusterv1.Machine) *anywherev1.NodeUpgrade { + return &anywherev1.NodeUpgrade{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-upgrade-request", + Namespace: "eksa-system", + }, + Spec: anywherev1.NodeUpgradeSpec{ + Machine: corev1.ObjectReference{ + Name: machine.Name, + Namespace: machine.Namespace, + }, + KubernetesVersion: "v1.28.1", + KubeletVersion: "v1.28.1", + }, + } +} + +func generateMachine(cluster *clusterv1.Cluster, node *corev1.Node) *clusterv1.Machine { + return &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine01", + Namespace: "eksa-system", + }, + Spec: clusterv1.MachineSpec{ + Version: ptr.String("v1.28.0"), + ClusterName: cluster.Name, + }, + Status: clusterv1.MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Name: node.Name, + }, + }, + } +} + +func generateNode() *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node01", + }, + } +} + +func generateCluster() *clusterv1.Cluster { + return &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "eksa-system", + }, + } +} diff --git a/pkg/api/v1alpha1/nodeupgrade_types.go b/pkg/api/v1alpha1/nodeupgrade_types.go index da575048433c9..0480c34536cc2 100644 --- a/pkg/api/v1alpha1/nodeupgrade_types.go +++ b/pkg/api/v1alpha1/nodeupgrade_types.go @@ -10,19 +10,25 @@ const ( // NodeUpgradeKind stores the Kind for NodeUpgrade. NodeUpgradeKind = "NodeUpgrade" - UpgraderPodCreated = "UpgraderPodCreated" + // UpgraderPodCreated reports whether the upgrader pod has been created for the node upgrade. + UpgraderPodCreated ConditionType = "UpgraderPodCreated" - // + // BinariesCopied reports whether the binaries have been copied over by the component copier container. BinariesCopied ConditionType = "BinariesCopied" + // ContainerdUpgraded reports whether containerd has been upgraded. ContainerdUpgraded ConditionType = "ContainerdUpgraded" + // CNIPluginsUpgraded reports whether the CNI plugins has been upgraded. CNIPluginsUpgraded ConditionType = "CNIPluginsUpgraded" + // KubeadmUpgraded reports whether Kubeadm has been upgraded. KubeadmUpgraded ConditionType = "KubeadmUpgraded" + // KubeletUpgraded reports whether kubelet has been upgraded. KubeletUpgraded ConditionType = "KubeletUpgraded" + // PostUpgradeCleanupCompleted reports whether the post upgrade operations have been completed. PostUpgradeCleanupCompleted ConditionType = "PostUpgradeCleanupCompleted" ) @@ -72,10 +78,12 @@ func init() { SchemeBuilder.Register(&NodeUpgrade{}, &NodeUpgradeList{}) } +// GetConditions returns all the Conditions for the NodeUpgrade object. func (n *NodeUpgrade) GetConditions() clusterv1.Conditions { return n.Status.Conditions } +// SetConditions sets the Conditons on the NodeUpgrade object. func (n *NodeUpgrade) SetConditions(conditions clusterv1.Conditions) { n.Status.Conditions = conditions } diff --git a/pkg/nodeupgrader/upgrader.go b/pkg/nodeupgrader/upgrader.go index 4a8f4cc4349e6..2b18c256338b0 100644 --- a/pkg/nodeupgrader/upgrader.go +++ b/pkg/nodeupgrader/upgrader.go @@ -11,43 +11,50 @@ import ( ) const ( - upgradeScript = "/foo/eksa-upgrades/scripts/upgrade.sh" - defaultUpgraderImage = "public.ecr.aws/t0n3a9y4/aws/upgrader:v1.28.3-eks-1-28-9" - controlPlaneLabel = "node-role.kubernetes.io/control-plane" + upgradeScript = "/foo/eksa-upgrades/scripts/upgrade.sh" - CopierContainerName = "components-copier" + // CopierContainerName holds the name of the components copier container. + CopierContainerName = "components-copier" + + // ContainerdUpgraderContainerName holds the name of the containerd upgrader container. ContainerdUpgraderContainerName = "containerd-upgrader" + + // CNIPluginsUpgraderContainerName holds the name of the CNI plugins upgrader container. CNIPluginsUpgraderContainerName = "cni-plugins-upgrader" - KubeadmUpgraderContainerName = "kubeadm-upgrader" - KubeletUpgradeContainerName = "kubelet-kubectl-upgrader" - PostUpgradeContainerName = "post-upgrade-status" + + // KubeadmUpgraderContainerName holds the name of the kubeadm upgrader container. + KubeadmUpgraderContainerName = "kubeadm-upgrader" + + // KubeletUpgradeContainerName holds the name of the kubelet/kubectl upgrader container. + KubeletUpgradeContainerName = "kubelet-kubectl-upgrader" + + // PostUpgradeContainerName holds the name of the post upgrade cleanup/status report container. + PostUpgradeContainerName = "post-upgrade-status" ) -// PodName returns the name of the upgrader pod based on the nodeName +// PodName returns the name of the upgrader pod based on the nodeName. func PodName(nodeName string) string { return fmt.Sprintf("%s-node-upgrader", nodeName) } +// UpgradeFirstControlPlanePod returns an upgrader pod that should be deployed on the first control plane node. func UpgradeFirstControlPlanePod(nodeName, image, kubernetesVersion, etcdVersion string) *corev1.Pod { p := upgraderPod(nodeName, image) p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_first_cp", kubernetesVersion, etcdVersion) - p.Spec.Containers = []corev1.Container{printAndCleanupContainer(image)} - return p } +// UpgradeRestControlPlanePod returns an upgrader pod that can be deployed on the remaining control plane nodes. func UpgradeRestControlPlanePod(nodeName, image string) *corev1.Pod { p := upgraderPod(nodeName, image) p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_rest_cp") - p.Spec.Containers = []corev1.Container{printAndCleanupContainer(image)} - return p } +// UpgradeWorkerPod returns an upgrader pod that can be deployed on worker nodes. func UpgradeWorkerPod(nodeName, image string) *corev1.Pod { p := upgraderPod(nodeName, image) p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_worker") - p.Spec.Containers = []corev1.Container{printAndCleanupContainer(image)} return p } @@ -75,6 +82,16 @@ func upgraderPod(nodeName, image string) *corev1.Pod { }, }, }, + // TODO: currently, the pod requires atleast one container. + // For the time being, I have added an nginx container but + // this should be replaced with something that makes more + // sense in in-place context. + Containers: []corev1.Container{ + { + Name: "done", + Image: "nginx", + }, + }, }, } } @@ -114,22 +131,14 @@ func nsenterContainer(image, name string, extraArgs ...string) corev1.Container "--ipc", "--net", } - args = append(args, extraArgs...) return corev1.Container{ Name: name, Image: image, Command: []string{"nsenter"}, - Args: args, + Args: append(args, extraArgs...), SecurityContext: &corev1.SecurityContext{ Privileged: ptr.Bool(true), }, } } - -func printAndCleanupContainer(image string) corev1.Container { - return corev1.Container{ - Name: "done", - Image: "nginx", - } -}