diff --git a/api/v1/gardenercluster_types.go b/api/v1/gardenercluster_types.go index a982c145..8bb14034 100644 --- a/api/v1/gardenercluster_types.go +++ b/api/v1/gardenercluster_types.go @@ -17,6 +17,9 @@ limitations under the License. package v1 import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -67,10 +70,25 @@ type Secret struct { type State string const ( - ReadyState State = "Ready" - ProcessingState State = "Processing" - ErrorState State = "Error" - DeletingState State = "Deleting" + ReadyState State = "Ready" + ErrorState State = "Error" +) + +type ConditionReason string + +const ( + ConditionReasonKubeconfigSecretCreated ConditionReason = "KubeconfigSecretCreated" + ConditionReasonKubeconfigSecretRotated ConditionReason = "KubeconfigSecretRotated" + ConditionReasonFailedToGetSecret ConditionReason = "FailedToCheckSecret" + ConditionReasonFailedToCreateSecret ConditionReason = "ConditionReasonFailedToCreateSecret" + ConditionReasonFailedToUpdateSecret ConditionReason = "FailedToUpdateSecret" + ConditionReasonFailedToGetKubeconfig ConditionReason = "FailedToGetKubeconfig" +) + +type ConditionType string + +const ( + ConditionTypeKubeconfigManagement ConditionType = "KubeconfigManagement" ) // GardenerClusterStatus defines the observed state of GardenerCluster @@ -86,6 +104,54 @@ type GardenerClusterStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } +func (cluster *GardenerCluster) UpdateConditionForReadyState(conditionType ConditionType, reason ConditionReason, conditionStatus metav1.ConditionStatus) { + cluster.Status.State = ReadyState + + condition := metav1.Condition{ + Type: string(conditionType), + Status: conditionStatus, + LastTransitionTime: metav1.Now(), + Reason: string(reason), + Message: getMessage(reason), + } + meta.RemoveStatusCondition(&cluster.Status.Conditions, condition.Type) + meta.SetStatusCondition(&cluster.Status.Conditions, condition) +} + +func (cluster *GardenerCluster) UpdateConditionForErrorState(conditionType ConditionType, reason ConditionReason, conditionStatus metav1.ConditionStatus, error error) { + cluster.Status.State = ErrorState + + condition := metav1.Condition{ + Type: string(conditionType), + Status: conditionStatus, + LastTransitionTime: metav1.Now(), + Reason: string(reason), + Message: fmt.Sprintf("%s Error: %s", getMessage(reason), error.Error()), + } + meta.RemoveStatusCondition(&cluster.Status.Conditions, condition.Type) + meta.SetStatusCondition(&cluster.Status.Conditions, condition) +} + +func getMessage(reason ConditionReason) string { + switch reason { + case ConditionReasonKubeconfigSecretCreated: + return "Secret created successfully." + case ConditionReasonKubeconfigSecretRotated: + return "Secret rotated successfully." + case ConditionReasonFailedToCreateSecret: + return "Failed to create secret." + case ConditionReasonFailedToUpdateSecret: + return "Failed to rotate secret." + case ConditionReasonFailedToGetSecret: + return "Failed to get secret." + case ConditionReasonFailedToGetKubeconfig: + return "Failed to get kubeconfig." + + default: + return "Unknown condition" + } +} + func init() { SchemeBuilder.Register(&GardenerCluster{}, &GardenerClusterList{}) } diff --git a/cmd/main.go b/cmd/main.go index 6004474b..9fd21517 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -40,6 +40,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" ) +// The ratio determines what is the minimal time that needs to pass to rotate certificate. +const minimalRotationTimeRatio = 0.6 + var ( scheme = runtime.NewScheme() //nolint:gochecknoglobals setupLog = ctrl.Log.WithName("setup") //nolint:gochecknoglobals @@ -106,15 +109,15 @@ func main() { } gardenerNamespace := fmt.Sprintf("garden-%s", gardenerProjectName) - expirationInSeconds := int64(expirationTime.Seconds()) - kubeconfigProvider, err := setupKubernetesKubeconfigProvider(gardenerKubeconfigPath, gardenerNamespace, expirationInSeconds) + kubeconfigProvider, err := setupKubernetesKubeconfigProvider(gardenerKubeconfigPath, gardenerNamespace, expirationTime) if err != nil { setupLog.Error(err, "unable to initialize kubeconfig provider", "controller", "GardenerCluster") os.Exit(1) } - if err = (controller.NewGardenerClusterController(mgr, kubeconfigProvider, logger)).SetupWithManager(mgr); err != nil { + rotationPeriod := time.Duration(minimalRotationTimeRatio*expirationTime.Minutes()) * time.Minute + if err = (controller.NewGardenerClusterController(mgr, kubeconfigProvider, logger, rotationPeriod)).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "GardenerCluster") os.Exit(1) } @@ -129,14 +132,15 @@ func main() { os.Exit(1) } - setupLog.Info("starting manager") + setupLog.Info("Starting Manager", "kubeconfigExpirationTime", expirationTime, "kubeconfigRotationPeriod", rotationPeriod) + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) } } -func setupKubernetesKubeconfigProvider(kubeconfigPath string, namespace string, expirationInSeconds int64) (gardener.KubeconfigProvider, error) { +func setupKubernetesKubeconfigProvider(kubeconfigPath string, namespace string, expirationTime time.Duration) (gardener.KubeconfigProvider, error) { restConfig, err := gardener.NewRestConfigFromFile(kubeconfigPath) if err != nil { return gardener.KubeconfigProvider{}, err @@ -163,5 +167,5 @@ func setupKubernetesKubeconfigProvider(kubeconfigPath string, namespace string, return gardener.NewKubeconfigProvider(shootClient, dynamicKubeconfigAPI, namespace, - expirationInSeconds), nil + int64(expirationTime.Seconds())), nil } diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 0f9269f5..86cf5fbc 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -33,3 +33,9 @@ rules: - gardenerclusters/finalizers verbs: - update +- apiGroups: + - infrastructuremanager.kyma-project.io + resources: + - gardenerclusters/status + verbs: + - update diff --git a/config/samples/clusterinventory_v1_gardenercluster.yaml b/config/samples/infrastructuremanager_v1_gardenercluster.yaml similarity index 100% rename from config/samples/clusterinventory_v1_gardenercluster.yaml rename to config/samples/infrastructuremanager_v1_gardenercluster.yaml diff --git a/internal/controller/gardener_cluster_controller.go b/internal/controller/gardener_cluster_controller.go index e9b8c96f..5ea16fff 100644 --- a/internal/controller/gardener_cluster_controller.go +++ b/internal/controller/gardener_cluster_controller.go @@ -22,21 +22,23 @@ import ( "time" "github.com/go-logr/logr" - infrastructuremanagerv1 "github.com/kyma-project/infrastructure-manager/api/v1" + imv1 "github.com/kyma-project/infrastructure-manager/api/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - lastKubeconfigSyncAnnotation = "operator.kyma-project.io/last-sync" - clusterCRNameLabel = "operator.kyma-project.io/cluster-name" - defaultRequeuInSeconds = 60 * 1000 + lastKubeconfigSyncAnnotation = "operator.kyma-project.io/last-sync" + forceKubeconfigRotationAnnotation = "operator.kyma-project.io/force-kubeconfig-rotation" + clusterCRNameLabel = "operator.kyma-project.io/cluster-name" ) // GardenerClusterController reconciles a GardenerCluster object @@ -45,14 +47,16 @@ type GardenerClusterController struct { Scheme *runtime.Scheme KubeconfigProvider KubeconfigProvider log logr.Logger + rotationPeriod time.Duration } -func NewGardenerClusterController(mgr ctrl.Manager, kubeconfigProvider KubeconfigProvider, logger logr.Logger) *GardenerClusterController { +func NewGardenerClusterController(mgr ctrl.Manager, kubeconfigProvider KubeconfigProvider, logger logr.Logger, rotationPeriod time.Duration) *GardenerClusterController { return &GardenerClusterController{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), KubeconfigProvider: kubeconfigProvider, log: logger, + rotationPeriod: rotationPeriod, } } @@ -64,6 +68,7 @@ type KubeconfigProvider interface { //+kubebuilder:rbac:groups=infrastructuremanager.kyma-project.io,resources=gardenerclusters,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups=infrastructuremanager.kyma-project.io,resources=gardenerclusters/finalizers,verbs=update +//+kubebuilder:rbac:groups=infrastructuremanager.kyma-project.io,resources=gardenerclusters/status,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -73,53 +78,95 @@ type KubeconfigProvider interface { // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/reconcile -func (r *GardenerClusterController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:revive - r.log.Info(fmt.Sprintf("Starting reconciliation loop for GardenerCluster resource: %v", req.NamespacedName)) +func (controller *GardenerClusterController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:revive + controller.log.Info("Starting reconciliation.", loggingContext(req)...) - var cluster infrastructuremanagerv1.GardenerCluster + var cluster imv1.GardenerCluster - err := r.Client.Get(ctx, req.NamespacedName, &cluster) + err := controller.Client.Get(ctx, req.NamespacedName, &cluster) if err != nil { if k8serrors.IsNotFound(err) { - err = r.deleteSecret(req.NamespacedName.Name) - if err != nil { - r.log.Error(err, "failed to delete secret") - } + err = controller.deleteKubeconfigSecret(req.NamespacedName.Name) } - return ctrl.Result{ - Requeue: true, - RequeueAfter: defaultRequeuInSeconds, - }, err + if err == nil { + controller.log.Info("Secret has been deleted.", loggingContext(req)...) + } + + return controller.resultWithoutRequeue(), err } - secret, err := r.getSecret(cluster.Spec.Shoot.Name) + lastSyncTime := time.Now() + kubeconfigRotated, err := controller.createOrRotateKubeconfigSecret(ctx, &cluster, lastSyncTime) if err != nil { - if !k8serrors.IsNotFound(err) { - return ctrl.Result{ - Requeue: true, - RequeueAfter: defaultRequeuInSeconds, - }, err - } + _ = controller.persistStatusChange(ctx, &cluster) + + return controller.resultWithoutRequeue(), err } - if secret == nil { - err = r.createSecret(ctx, cluster) + err = controller.removeForceRotationAnnotation(ctx, &cluster) + if err != nil { + return controller.resultWithoutRequeue(), err + } + + if kubeconfigRotated { + err = controller.persistStatusChange(ctx, &cluster) if err != nil { - return r.ResultWithoutRequeue(), err + return controller.resultWithoutRequeue(), err } } - return ctrl.Result{}, nil + return controller.resultWithRequeue(), nil +} + +func loggingContextFromCluster(cluster *imv1.GardenerCluster) []any { + return []any{"GardenerCluster", cluster.Name, "Namespace", cluster.Namespace} +} + +func loggingContext(req ctrl.Request) []any { + return []any{"GardenerCluster", req.Name, "Namespace", req.Namespace} +} + +func (controller *GardenerClusterController) resultWithRequeue() ctrl.Result { + return ctrl.Result{ + Requeue: true, + RequeueAfter: controller.rotationPeriod, + } +} + +func (controller *GardenerClusterController) resultWithoutRequeue() ctrl.Result { + return ctrl.Result{} +} + +func (controller *GardenerClusterController) persistStatusChange(ctx context.Context, cluster *imv1.GardenerCluster) error { + key := types.NamespacedName{ + Name: cluster.Name, + Namespace: cluster.Namespace, + } + var clusterToUpdate imv1.GardenerCluster + + err := controller.Client.Get(ctx, key, &clusterToUpdate) + if err != nil { + return err + } + + clusterToUpdate.Status = cluster.Status + + statusErr := controller.Client.Status().Update(ctx, &clusterToUpdate) + if statusErr != nil { + controller.log.Error(statusErr, "Failed to set state for GardenerCluster") + } + + return statusErr } -func (r *GardenerClusterController) deleteSecret(clusterCRName string) error { +func (controller *GardenerClusterController) deleteKubeconfigSecret(clusterCRName string) error { selector := client.MatchingLabels(map[string]string{ clusterCRNameLabel: clusterCRName, }) var secretList corev1.SecretList - err := r.Client.List(context.TODO(), &secretList, selector) + err := controller.Client.List(context.TODO(), &secretList, selector) if err != nil { return err } @@ -128,23 +175,17 @@ func (r *GardenerClusterController) deleteSecret(clusterCRName string) error { return errors.Errorf("unexpected numer of secrets found for cluster CR `%s`", clusterCRName) } - return r.Client.Delete(context.TODO(), &secretList.Items[0]) -} - -func (r *GardenerClusterController) ResultWithoutRequeue() ctrl.Result { - return ctrl.Result{ - Requeue: false, - } + return controller.Client.Delete(context.TODO(), &secretList.Items[0]) } -func (r *GardenerClusterController) getSecret(shootName string) (*corev1.Secret, error) { +func (controller *GardenerClusterController) getSecret(shootName string) (*corev1.Secret, error) { var secretList corev1.SecretList shootNameSelector := client.MatchingLabels(map[string]string{ "kyma-project.io/shoot-name": shootName, }) - err := r.Client.List(context.Background(), &secretList, shootNameSelector) + err := controller.Client.List(context.Background(), &secretList, shootNameSelector) if err != nil { return nil, err } @@ -162,16 +203,146 @@ func (r *GardenerClusterController) getSecret(shootName string) (*corev1.Secret, return &secretList.Items[0], nil } -func (r *GardenerClusterController) createSecret(ctx context.Context, cluster infrastructuremanagerv1.GardenerCluster) error { - secret, err := r.newSecret(cluster) +func (controller *GardenerClusterController) createOrRotateKubeconfigSecret(ctx context.Context, cluster *imv1.GardenerCluster, lastSyncTime time.Time) (bool, error) { + existingSecret, err := controller.getSecret(cluster.Spec.Shoot.Name) + if err != nil && !k8serrors.IsNotFound(err) { + cluster.UpdateConditionForErrorState(imv1.ConditionTypeKubeconfigManagement, imv1.ConditionReasonFailedToGetSecret, metav1.ConditionTrue, err) + return true, err + } + + if !secretNeedsToBeRotated(cluster, existingSecret, controller.rotationPeriod) { + message := fmt.Sprintf("Secret %s in namespace %s does not need to be rotated yet.", cluster.Spec.Kubeconfig.Secret.Name, cluster.Spec.Kubeconfig.Secret.Namespace) + controller.log.Info(message, loggingContextFromCluster(cluster)...) + return false, nil + } + + if secretRotationForced(cluster) { + message := fmt.Sprintf("Rotation of secret %s in namespace %s forced.", cluster.Spec.Kubeconfig.Secret.Name, cluster.Spec.Kubeconfig.Secret.Namespace) + controller.log.Info(message, loggingContextFromCluster(cluster)...) + } + + kubeconfig, err := controller.KubeconfigProvider.Fetch(cluster.Spec.Shoot.Name) + if err != nil { + cluster.UpdateConditionForErrorState(imv1.ConditionTypeKubeconfigManagement, imv1.ConditionReasonFailedToGetKubeconfig, metav1.ConditionTrue, err) + return true, err + } + + if existingSecret != nil { + return true, controller.updateExistingSecret(ctx, kubeconfig, cluster, existingSecret, lastSyncTime) + } + + return true, controller.createNewSecret(ctx, kubeconfig, cluster, lastSyncTime) +} + +func secretNeedsToBeRotated(cluster *imv1.GardenerCluster, secret *corev1.Secret, rotationPeriod time.Duration) bool { + return secretRotationTimePassed(secret, rotationPeriod) || secretRotationForced(cluster) +} + +func secretRotationTimePassed(secret *corev1.Secret, rotationPeriod time.Duration) bool { + const rotationPeriodRatio = 0.95 + + if secret == nil { + return true + } + + annotations := secret.GetAnnotations() + + _, found := annotations[lastKubeconfigSyncAnnotation] + + if !found { + return true + } + + lastSyncTimeString := annotations[lastKubeconfigSyncAnnotation] + lastSyncTime, err := time.Parse(time.RFC3339, lastSyncTimeString) + if err != nil { + return true + } + now := time.Now() + alreadyValidFor := now.Sub(lastSyncTime) + + return alreadyValidFor.Minutes() >= rotationPeriodRatio*rotationPeriod.Minutes() +} + +func secretRotationForced(cluster *imv1.GardenerCluster) bool { + annotations := cluster.GetAnnotations() + if annotations == nil { + return false + } + + _, found := annotations[forceKubeconfigRotationAnnotation] + + return found +} + +func (controller *GardenerClusterController) createNewSecret(ctx context.Context, kubeconfig string, cluster *imv1.GardenerCluster, lastSyncTime time.Time) error { + newSecret := controller.newSecret(*cluster, kubeconfig, lastSyncTime) + err := controller.Client.Create(ctx, &newSecret) + if err != nil { + cluster.UpdateConditionForErrorState(imv1.ConditionTypeKubeconfigManagement, imv1.ConditionReasonFailedToCreateSecret, metav1.ConditionTrue, err) + + return err + } + + cluster.UpdateConditionForReadyState(imv1.ConditionTypeKubeconfigManagement, imv1.ConditionReasonKubeconfigSecretCreated, metav1.ConditionTrue) + + message := fmt.Sprintf("Secret %s has been created in %s namespace.", newSecret.Name, newSecret.Namespace) + controller.log.Info(message, loggingContextFromCluster(cluster)...) + + return nil +} + +func (controller *GardenerClusterController) updateExistingSecret(ctx context.Context, kubeconfig string, cluster *imv1.GardenerCluster, existingSecret *corev1.Secret, lastSyncTime time.Time) error { + existingSecret.Data[cluster.Spec.Kubeconfig.Secret.Key] = []byte(kubeconfig) + annotations := existingSecret.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + + annotations[lastKubeconfigSyncAnnotation] = lastSyncTime.UTC().Format(time.RFC3339) + existingSecret.SetAnnotations(annotations) + + err := controller.Client.Update(ctx, existingSecret) if err != nil { + cluster.UpdateConditionForErrorState(imv1.ConditionTypeKubeconfigManagement, imv1.ConditionReasonFailedToUpdateSecret, metav1.ConditionTrue, err) + return err } - return r.Client.Create(ctx, &secret) + cluster.UpdateConditionForReadyState(imv1.ConditionTypeKubeconfigManagement, imv1.ConditionReasonKubeconfigSecretRotated, metav1.ConditionTrue) + + message := fmt.Sprintf("Secret %s has been updated in %s namespace.", existingSecret.Name, existingSecret.Namespace) + controller.log.Info(message, loggingContextFromCluster(cluster)...) + + return nil +} + +func (controller *GardenerClusterController) removeForceRotationAnnotation(ctx context.Context, cluster *imv1.GardenerCluster) error { + secretRotationForced := secretRotationForced(cluster) + + if secretRotationForced { + key := types.NamespacedName{ + Name: cluster.Name, + Namespace: cluster.Namespace, + } + var clusterToUpdate imv1.GardenerCluster + + err := controller.Client.Get(ctx, key, &clusterToUpdate) + if err != nil { + return err + } + + annotations := clusterToUpdate.GetAnnotations() + delete(annotations, forceKubeconfigRotationAnnotation) + clusterToUpdate.SetAnnotations(annotations) + + return controller.Client.Update(ctx, &clusterToUpdate) + } + + return nil } -func (r *GardenerClusterController) newSecret(cluster infrastructuremanagerv1.GardenerCluster) (corev1.Secret, error) { +func (controller *GardenerClusterController) newSecret(cluster imv1.GardenerCluster, kubeconfig string, lastSyncTime time.Time) corev1.Secret { labels := map[string]string{} for key, val := range cluster.Labels { @@ -180,25 +351,20 @@ func (r *GardenerClusterController) newSecret(cluster infrastructuremanagerv1.Ga labels["operator.kyma-project.io/managed-by"] = "infrastructure-manager" labels[clusterCRNameLabel] = cluster.Name - kubeconfig, err := r.KubeconfigProvider.Fetch(cluster.Spec.Shoot.Name) - if err != nil { - return corev1.Secret{}, err - } - return corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: cluster.Spec.Kubeconfig.Secret.Name, Namespace: cluster.Spec.Kubeconfig.Secret.Namespace, Labels: labels, - Annotations: map[string]string{lastKubeconfigSyncAnnotation: time.Now().UTC().String()}, + Annotations: map[string]string{lastKubeconfigSyncAnnotation: lastSyncTime.UTC().Format(time.RFC3339)}, }, StringData: map[string]string{cluster.Spec.Kubeconfig.Secret.Key: kubeconfig}, - }, nil + } } // SetupWithManager sets up the controller with the Manager. -func (r *GardenerClusterController) SetupWithManager(mgr ctrl.Manager) error { +func (controller *GardenerClusterController) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&infrastructuremanagerv1.GardenerCluster{}). - Complete(r) + For(&imv1.GardenerCluster{}, builder.WithPredicates()). + Complete(controller) } diff --git a/internal/controller/gardener_cluster_controller_test.go b/internal/controller/gardener_cluster_controller_test.go index 9682f666..f60ae6fd 100644 --- a/internal/controller/gardener_cluster_controller_test.go +++ b/internal/controller/gardener_cluster_controller_test.go @@ -15,12 +15,12 @@ import ( var _ = Describe("Gardener Cluster controller", func() { Context("Secret with kubeconfig doesn't exist", func() { - kymaName := "kymaname1" - secretName := "secret-name1" - shootName := "shootName1" - namespace := "default" + It("Should create secret, and set Ready status on CR", func() { + kymaName := "kymaname1" + secretName := "secret-name1" + shootName := "shootName1" + namespace := "default" - It("Create secret", func() { By("Create GardenerCluster CR") gardenerClusterCR := fixGardenerClusterCR(kymaName, namespace, shootName, secretName) @@ -28,36 +28,182 @@ var _ = Describe("Gardener Cluster controller", func() { By("Wait for secret creation") var kubeconfigSecret corev1.Secret - key := types.NamespacedName{Name: secretName, Namespace: namespace} + secretKey := types.NamespacedName{Name: secretName, Namespace: namespace} Eventually(func() bool { - return k8sClient.Get(context.Background(), key, &kubeconfigSecret) == nil + return k8sClient.Get(context.Background(), secretKey, &kubeconfigSecret) == nil + }, time.Second*30, time.Second*3).Should(BeTrue()) + + gardenerClusterKey := types.NamespacedName{Name: gardenerClusterCR.Name, Namespace: gardenerClusterCR.Namespace} + var newGardenerCluster imv1.GardenerCluster + Eventually(func() bool { + err := k8sClient.Get(context.Background(), gardenerClusterKey, &newGardenerCluster) + if err != nil { + return false + } + + return newGardenerCluster.Status.State == imv1.ReadyState }, time.Second*30, time.Second*3).Should(BeTrue()) - err := k8sClient.Get(context.Background(), key, &kubeconfigSecret) + err := k8sClient.Get(context.Background(), secretKey, &kubeconfigSecret) Expect(err).To(BeNil()) - expectedSecret := fixNewSecret(secretName, namespace, kymaName, shootName, "kubeconfig1") + expectedSecret := fixNewSecret(secretName, namespace, kymaName, shootName, "kubeconfig1", "") Expect(kubeconfigSecret.Labels).To(Equal(expectedSecret.Labels)) Expect(kubeconfigSecret.Data).To(Equal(expectedSecret.Data)) - Expect(kubeconfigSecret.Annotations[lastKubeconfigSyncAnnotation]).To(Not(BeEmpty())) + lastSyncTime := kubeconfigSecret.Annotations[lastKubeconfigSyncAnnotation] + Expect(lastSyncTime).ToNot(BeEmpty()) + + }) + + It("Should delete secret", func() { + kymaName := "kymaname2" + secretName := "secret-name2" + shootName := "shootName2" + namespace := "default" + + By("Create GardenerCluster CR") + + gardenerClusterCR := fixGardenerClusterCR(kymaName, namespace, shootName, secretName) + Expect(k8sClient.Create(context.Background(), &gardenerClusterCR)).To(Succeed()) + + By("Wait for secret creation") + var kubeconfigSecret corev1.Secret + secretKey := types.NamespacedName{Name: secretName, Namespace: namespace} + + Eventually(func() bool { + return k8sClient.Get(context.Background(), secretKey, &kubeconfigSecret) == nil + }, time.Second*30, time.Second*3).Should(BeTrue()) By("Delete Cluster CR") Expect(k8sClient.Delete(context.Background(), &gardenerClusterCR)).To(Succeed()) By("Wait for secret deletion") Eventually(func() bool { - err := k8sClient.Get(context.Background(), key, &kubeconfigSecret) + err := k8sClient.Get(context.Background(), secretKey, &kubeconfigSecret) return err != nil && k8serrors.IsNotFound(err) }, time.Second*30, time.Second*3).Should(BeTrue()) }) + + It("Should set Error status on CR if failed to fetch kubeconfig", func() { + kymaName := "kymaname3" + secretName := "secret-name3" + shootName := "shootName3" + namespace := "default" + + gardenerClusterCR := fixGardenerClusterCR(kymaName, namespace, shootName, secretName) + Expect(k8sClient.Create(context.Background(), &gardenerClusterCR)).To(Succeed()) + + gardenerClusterKey := types.NamespacedName{Name: gardenerClusterCR.Name, Namespace: gardenerClusterCR.Namespace} + var newGardenerCluster imv1.GardenerCluster + Eventually(func() bool { + err := k8sClient.Get(context.Background(), gardenerClusterKey, &newGardenerCluster) + if err != nil { + return false + } + + return newGardenerCluster.Status.State == imv1.ErrorState + }, time.Second*30, time.Second*3).Should(BeTrue()) + }) + }) + + Context("Secret with kubeconfig exists", func() { + namespace := "default" + + DescribeTable("Should update secret", func(gardenerClusterCR imv1.GardenerCluster, secret corev1.Secret, expectedKubeconfig string) { + By("Create kubeconfig secret") + Expect(k8sClient.Create(context.Background(), &secret)).To(Succeed()) + + previousTimestamp := secret.Annotations[lastKubeconfigSyncAnnotation] + + By("Create Cluster CR") + Expect(k8sClient.Create(context.Background(), &gardenerClusterCR)).To(Succeed()) + + var kubeconfigSecret corev1.Secret + secretKey := types.NamespacedName{Name: secret.Name, Namespace: namespace} + + Eventually(func() bool { + err := k8sClient.Get(context.Background(), secretKey, &kubeconfigSecret) + if err != nil { + return false + } + + timestampAnnotation := kubeconfigSecret.Annotations[lastKubeconfigSyncAnnotation] + + return timestampAnnotation != previousTimestamp + }, time.Second*30, time.Second*3).Should(BeTrue()) + + gardenerClusterKey := types.NamespacedName{Name: gardenerClusterCR.Name, Namespace: gardenerClusterCR.Namespace} + var newGardenerCluster imv1.GardenerCluster + + Eventually(func() bool { + err := k8sClient.Get(context.Background(), gardenerClusterKey, &newGardenerCluster) + if err != nil { + return false + } + + readyState := newGardenerCluster.Status.State == imv1.ReadyState + _, forceRotationAnnotationFound := newGardenerCluster.GetAnnotations()[forceKubeconfigRotationAnnotation] + + return readyState && !forceRotationAnnotationFound + }, time.Second*45, time.Second*3).Should(BeTrue()) + + err := k8sClient.Get(context.Background(), secretKey, &kubeconfigSecret) + Expect(err).To(BeNil()) + Expect(string(kubeconfigSecret.Data["config"])).To(Equal(expectedKubeconfig)) + lastSyncTime := kubeconfigSecret.Annotations[lastKubeconfigSyncAnnotation] + Expect(lastSyncTime).ToNot(BeEmpty()) + + }, + Entry("Rotate kubeconfig when rotation time passed", + fixGardenerClusterCR("kymaname4", namespace, "shootName4", "secret-name4"), + fixNewSecret("secret-name4", namespace, "kymaname4", "shootName4", "kubeconfig4", "2023-10-09T23:00:00Z"), + "kubeconfig4"), + Entry("Force rotation", + fixGardenerClusterCRWithForceRotationAnnotation("kymaname5", namespace, "shootName5", "secret-name5"), + fixNewSecret("secret-name5", namespace, "kymaname5", "shootName5", "kubeconfig5", time.Now().UTC().Format(time.RFC3339)), + "kubeconfig5"), + ) + + It("Should skip rotation", func() { + By("Create kubeconfig secret") + secret := fixNewSecret("secret-name6", namespace, "kymaname6", "shootName6", "kubeconfig6", time.Now().UTC().Format(time.RFC3339)) + Expect(k8sClient.Create(context.Background(), &secret)).To(Succeed()) + + previousTimestamp := secret.Annotations[lastKubeconfigSyncAnnotation] + + By("Create Cluster CR") + gardenerClusterCR := fixGardenerClusterCR("kymaname6", namespace, "shootName6", "secret-name6") + Expect(k8sClient.Create(context.Background(), &gardenerClusterCR)).To(Succeed()) + + var kubeconfigSecret corev1.Secret + secretKey := types.NamespacedName{Name: secret.Name, Namespace: namespace} + + Consistently(func() bool { + err := k8sClient.Get(context.Background(), secretKey, &kubeconfigSecret) + if err != nil { + return false + } + + timestampAnnotation := kubeconfigSecret.Annotations[lastKubeconfigSyncAnnotation] + + return timestampAnnotation == previousTimestamp + }, time.Second*45, time.Second*3).Should(BeTrue()) + }) }) }) -func fixNewSecret(name, namespace, kymaName, shootName, data string) corev1.Secret { +func fixNewSecret(name, namespace, kymaName, shootName, data string, lastSyncTime string) corev1.Secret { labels := fixSecretLabels(kymaName, shootName) + annotations := map[string]string{lastKubeconfigSyncAnnotation: lastSyncTime} builder := newTestSecret(name, namespace) - return builder.WithLabels(labels).WithData(data).ToSecret() + return builder.WithLabels(labels).WithAnnotations(annotations).WithData(data).ToSecret() +} + +func (sb *TestSecret) WithAnnotations(annotations map[string]string) *TestSecret { + sb.secret.Annotations = annotations + + return sb } func (sb *TestSecret) WithLabels(labels map[string]string) *TestSecret { @@ -92,18 +238,27 @@ type TestSecret struct { } func fixSecretLabels(kymaName, shootName string) map[string]string { - labels := fixClusterInventoryLabels(kymaName, shootName) + labels := fixGardenerClusterLabels(kymaName, shootName) labels["operator.kyma-project.io/managed-by"] = "infrastructure-manager" labels["operator.kyma-project.io/cluster-name"] = kymaName return labels } func fixGardenerClusterCR(kymaName, namespace, shootName, secretName string) imv1.GardenerCluster { - return newTestInfrastructureManagerCR(kymaName, namespace, shootName, secretName). - WithLabels(fixClusterInventoryLabels(kymaName, shootName)).ToCluster() + return newTestGardenerClusterCR(kymaName, namespace, shootName, secretName). + WithLabels(fixGardenerClusterLabels(kymaName, shootName)).ToCluster() +} + +func fixGardenerClusterCRWithForceRotationAnnotation(kymaName, namespace, shootName, secretName string) imv1.GardenerCluster { + annotations := map[string]string{forceKubeconfigRotationAnnotation: "true"} + + return newTestGardenerClusterCR(kymaName, namespace, shootName, secretName). + WithLabels(fixGardenerClusterLabels(kymaName, shootName)). + WithAnnotations(annotations). + ToCluster() } -func newTestInfrastructureManagerCR(name, namespace, shootName, secretName string) *TestGardenerClusterCR { +func newTestGardenerClusterCR(name, namespace, shootName, secretName string) *TestGardenerClusterCR { return &TestGardenerClusterCR{ gardenerCluster: imv1.GardenerCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -132,6 +287,12 @@ func (sb *TestGardenerClusterCR) WithLabels(labels map[string]string) *TestGarde return sb } +func (sb *TestGardenerClusterCR) WithAnnotations(annotations map[string]string) *TestGardenerClusterCR { + sb.gardenerCluster.Annotations = annotations + + return sb +} + func (sb *TestGardenerClusterCR) ToCluster() imv1.GardenerCluster { return sb.gardenerCluster } @@ -140,7 +301,7 @@ type TestGardenerClusterCR struct { gardenerCluster imv1.GardenerCluster } -func fixClusterInventoryLabels(kymaName, shootName string) map[string]string { +func fixGardenerClusterLabels(kymaName, shootName string) map[string]string { labels := map[string]string{} labels["kyma-project.io/instance-id"] = "instanceID" diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 8fd56f39..15c7c8b3 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -20,11 +20,13 @@ import ( "context" "path/filepath" "testing" + "time" infrastructuremanagerv1 "github.com/kyma-project/infrastructure-manager/api/v1" "github.com/kyma-project/infrastructure-manager/internal/controller/mocks" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/pkg/errors" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" @@ -45,6 +47,8 @@ var ( cancelSuiteCtx context.CancelFunc //nolint:gochecknoglobals ) +const TestKubeconfigValidityTime = 24 * time.Hour + func TestControllers(t *testing.T) { RegisterFailHandler(Fail) @@ -76,7 +80,7 @@ var _ = BeforeSuite(func() { kubeconfigProviderMock := &mocks.KubeconfigProvider{} setupKubeconfigProviderMock(kubeconfigProviderMock) - controller := NewGardenerClusterController(mgr, kubeconfigProviderMock, logger) + controller := NewGardenerClusterController(mgr, kubeconfigProviderMock, logger, TestKubeconfigValidityTime) Expect(controller).NotTo(BeNil()) err = controller.SetupWithManager(mgr) @@ -100,7 +104,10 @@ var _ = BeforeSuite(func() { func setupKubeconfigProviderMock(kpMock *mocks.KubeconfigProvider) { kpMock.On("Fetch", "shootName1").Return("kubeconfig1", nil) kpMock.On("Fetch", "shootName2").Return("kubeconfig2", nil) - kpMock.On("Fetch", "shootName3").Return("kubeconfig3", nil) + kpMock.On("Fetch", "shootName3").Return("", errors.New("failed to get kubeconfig")) + kpMock.On("Fetch", "shootName6").Return("kubeconfig6", nil) + kpMock.On("Fetch", "shootName4").Return("kubeconfig4", nil) + kpMock.On("Fetch", "shootName5").Return("kubeconfig5", nil) } var _ = AfterSuite(func() {