Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Waiting for control plane to be fully upgraded #6764

Merged
merged 1 commit into from
Oct 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/clusterapi/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"

const (
ControlPlaneReadyCondition clusterv1.ConditionType = "ControlPlaneReady"
ReadyCondition clusterv1.ConditionType = "Ready"
)
31 changes: 29 additions & 2 deletions pkg/controller/clusters/clusterapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,40 @@
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/features"
)

// CheckControlPlaneReady is a controller helper to check whether a CAPI cluster CP for
// an eks-a cluster is ready or not. This is intended to be used from cluster reconcilers
// CheckControlPlaneReady is a controller helper to check whether KCP object for
// the cluster is ready or not. This is intended to be used from cluster reconcilers
// due its signature and that it returns controller results with appropriate wait times whenever
// the cluster is not ready.
func CheckControlPlaneReady(ctx context.Context, client client.Client, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
if features.IsActive(features.ExperimentalSelfManagedClusterUpgrade()) {
kcp, err := controller.GetKubeadmControlPlane(ctx, client, cluster)
if err != nil {
return controller.Result{}, err
}

Check warning on line 26 in pkg/controller/clusters/clusterapi.go

View check run for this annotation

Codecov / codecov/patch

pkg/controller/clusters/clusterapi.go#L25-L26

Added lines #L25 - L26 were not covered by tests

if kcp == nil {
log.Info("KCP does not exist yet, requeuing")
return controller.ResultWithRequeue(5 * time.Second), nil
}

// We make sure to check that the status is up to date before using it
if kcp.Status.ObservedGeneration != kcp.ObjectMeta.Generation {
log.Info("KCP information is outdated, requeing")
return controller.ResultWithRequeue(5 * time.Second), nil
}

if !conditions.IsTrue(kcp, clusterapi.ReadyCondition) {
log.Info("KCP is not ready yet, requeing")
return controller.ResultWithRequeue(30 * time.Second), nil
}

log.Info("KCP is ready")
return controller.Result{}, nil
}

capiCluster, err := controller.GetCAPICluster(ctx, client, cluster)
if err != nil {
return controller.Result{}, err
Expand Down
114 changes: 112 additions & 2 deletions pkg/controller/clusters/clusterapi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package clusters_test

import (
"context"
"os"
"testing"
"time"

Expand All @@ -10,15 +11,18 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

"github.com/aws/eks-anywhere/internal/test"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
"github.com/aws/eks-anywhere/pkg/features"
)

func TestCheckControlPlaneReadyItIsReady(t *testing.T) {
Expand Down Expand Up @@ -82,6 +86,92 @@ func TestCheckControlPlaneReadyErrorReading(t *testing.T) {
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
}

func TestCheckControlPlaneReadyItIsReadyWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionTrue,
},
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(controller.Result{}))
}

func TestCheckControlPlaneReadyNoKcpWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}),
)
}

func TestCheckControlPlaneNotReadyWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status = v1beta1.KubeadmControlPlaneStatus{
ObservedGeneration: 2,
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}),
)
}

func TestCheckControlPlaneStatusNotReadyWithKindlessUpgrade(t *testing.T) {
features.ClearCache()
os.Setenv(features.ExperimentalSelfManagedClusterUpgradeEnvVar, "true")

g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionFalse,
},
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 30 * time.Second}}),
)
}

func eksaCluster() *anywherev1.Cluster {
return &anywherev1.Cluster{
TypeMeta: metav1.TypeMeta{
Expand All @@ -104,13 +194,33 @@ func capiCluster(opts ...capiClusterOpt) *clusterv1.Cluster {
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
Namespace: constants.EksaSystemNamespace,
},
}

for _, opt := range opts {
opt(c)
}

return c
}

type kcpObjectOpt func(*v1beta1.KubeadmControlPlane)

func kcpObject(opts ...kcpObjectOpt) *v1beta1.KubeadmControlPlane {
k := &v1beta1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "KubeadmControlPlane",
APIVersion: v1beta1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: constants.EksaSystemNamespace,
},
}

for _, opt := range opts {
opt(k)
}

return k
}