From ec1673fa0db4fd432fe9c37fd3f002e0c08d4f4d Mon Sep 17 00:00:00 2001 From: m00g3n Date: Fri, 2 Aug 2024 09:13:04 +0200 Subject: [PATCH] Add kubeconfig access handling --- api/v1/runtime_types.go | 1 + cmd/main.go | 2 + .../runtime_fsm_apply_clusterrolebindings.go | 189 ++++++++++++++++++ .../fsm/runtime_fsm_create_kubeconfig.go | 2 +- .../runtime/fsm/runtime_fsm_patch_shoot.go | 2 +- .../runtime/fsm/runtime_fsm_process_shoot.go | 20 -- ...runtime_fsm_waiting_for_shoot_reconcile.go | 2 +- 7 files changed, 195 insertions(+), 23 deletions(-) create mode 100644 internal/controller/runtime/fsm/runtime_fsm_apply_clusterrolebindings.go delete mode 100644 internal/controller/runtime/fsm/runtime_fsm_process_shoot.go diff --git a/api/v1/runtime_types.go b/api/v1/runtime_types.go index dca4c64f..8b48c95b 100644 --- a/api/v1/runtime_types.go +++ b/api/v1/runtime_types.go @@ -228,6 +228,7 @@ func (k *Runtime) UpdateStateDeletion(c RuntimeConditionType, r RuntimeCondition meta.SetStatusCondition(&k.Status.Conditions, condition) } +// FIXME: create update status for failed func (k *Runtime) UpdateStatePending(c RuntimeConditionType, r RuntimeConditionReason, status, msg string) { if status != "False" { k.Status.State = RuntimeStatePending diff --git a/cmd/main.go b/cmd/main.go index 4167ff23..5f45852f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -35,6 +35,7 @@ import ( "github.com/kyma-project/infrastructure-manager/internal/gardener/kubeconfig" "github.com/kyma-project/infrastructure-manager/internal/gardener/shoot" "github.com/pkg/errors" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -54,6 +55,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(infrastructuremanagerv1.AddToScheme(scheme)) + utilruntime.Must(rbacv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/internal/controller/runtime/fsm/runtime_fsm_apply_clusterrolebindings.go b/internal/controller/runtime/fsm/runtime_fsm_apply_clusterrolebindings.go new file mode 100644 index 00000000..09f7a9b8 --- /dev/null +++ b/internal/controller/runtime/fsm/runtime_fsm_apply_clusterrolebindings.go @@ -0,0 +1,189 @@ +package fsm + +import ( + "context" + "slices" + + authenticationv1alpha1 "github.com/gardener/gardener/pkg/apis/authentication/v1alpha1" + gardener_api "github.com/gardener/gardener/pkg/apis/core/v1beta1" + imv1 "github.com/kyma-project/infrastructure-manager/api/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + labelsClusterRoleBindings = map[string]string{ + "app": "kyma", + "reconciler.kyma-project.io/managed-by": "kim", + } +) + +func getShootClientWithAdmin(ctx context.Context, + adminKubeconfigClient client.SubResourceClient, + shoot *gardener_api.Shoot) (client.Client, error) { + // request for admin kubeconfig with low expiration timeout + var req authenticationv1alpha1.AdminKubeconfigRequest + if err := adminKubeconfigClient.Create(ctx, shoot, &req); err != nil { + return nil, err + } + + restConfig, err := clientcmd.RESTConfigFromKubeConfig(req.Status.Kubeconfig) + if err != nil { + return nil, err + } + + shootClientWithAdmin, err := client.New(restConfig, client.Options{}) + if err != nil { + return nil, err + } + + return shootClientWithAdmin, nil +} + +func isRBACUserKind(s rbacv1.Subject) bool { + return s.Kind == rbacv1.UserKind && + s.APIGroup == rbacv1.GroupName +} + +func getRemoved(crbs []rbacv1.ClusterRoleBinding, admins []string) (removed []rbacv1.ClusterRoleBinding) { + // iterate over cluster role bindings to find out removed administrators + for _, crb := range crbs { + if !labels.Set(crb.Labels).AsSelector().Matches(labels.Set(labelsClusterRoleBindings)) { + // cluster role binding is not controlled by KIM + continue + } + + index := slices.IndexFunc(crb.Subjects, isRBACUserKind) + if index < 0 { + // cluster role binding does not contain user subject + continue + } + + subjectUserName := crb.Subjects[index].Name + if slices.Contains(admins, subjectUserName) { + continue + } + // administrator was removed + removed = append(removed, crb) + } + + return removed +} + +var newContainsAdmin = func(admin string) func(rbacv1.ClusterRoleBinding) bool { + return func(r rbacv1.ClusterRoleBinding) bool { + for _, subject := range r.Subjects { + if !isRBACUserKind(subject) || subject.Name != admin { + continue + } + // admin found + return true + } + // admin not found in the slice + return false + } +} + +func getMissing(crbs []rbacv1.ClusterRoleBinding, admins []string) (missing []rbacv1.ClusterRoleBinding) { + for _, admin := range admins { + containsAdmin := newContainsAdmin(admin) + if slices.ContainsFunc(crbs, containsAdmin) { + continue + } + crb := toAdminClusterRoleBinding(admin) + missing = append(missing, crb) + } + + return missing +} + +func toAdminClusterRoleBinding(name string) rbacv1.ClusterRoleBinding { + return rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "admin-", + Labels: labelsClusterRoleBindings, + }, + Subjects: []rbacv1.Subject{{ + Kind: rbacv1.UserKind, + Name: name, + APIGroup: rbacv1.GroupName, + }}, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + }, + } +} + +var newDelCRBs = func(ctx context.Context, shootClient client.Client, crbs []rbacv1.ClusterRoleBinding) func() error { + return func() error { + for _, crb := range crbs { + if err := shootClient.Delete(ctx, &crb); err != nil { + return err + } + } + return nil + } +} + +var newAddCRBs = func(ctx context.Context, shootClient client.Client, crbs []rbacv1.ClusterRoleBinding) func() error { + return func() error { + for _, crb := range crbs { + if err := shootClient.Create(ctx, &crb); err != nil { + return err + } + } + return nil + } +} + +func updateCRBApplyFailed(rt *imv1.Runtime) { + rt.UpdateStatePending( + imv1.ConditionTypeRuntimeConfigured, + imv1.ConditionReasonConfigurationErr, + string(metav1.ConditionFalse), + "failed to update kubeconfig admin access", + ) +} + +func sFnApplyClusterRoleBindings(ctx context.Context, m *fsm, s *systemState) (stateFn, *ctrl.Result, error) { + // prepare subresource client to request admin kubeconfig + srscClient := m.ShootClient.SubResource("adminkubeconfig") + shootAdminClient, err := getShootClientWithAdmin(ctx, srscClient, s.shoot) + if err != nil { + updateCRBApplyFailed(&s.instance) + return updateStatusAndStopWithError(err) + } + // list existing cluster role bindings + var crbList rbacv1.ClusterRoleBindingList + if err := shootAdminClient.List(ctx, &crbList); err != nil { + updateCRBApplyFailed(&s.instance) + return updateStatusAndStopWithError(err) + } + + removed := getRemoved(crbList.Items, s.instance.Spec.Security.Administrators) + var missing []rbacv1.ClusterRoleBinding + + // FIXME add status check + if len(removed) == 0 && len(missing) == 0 { + stop() + } + + for _, fn := range []func() error{ + newDelCRBs(ctx, shootAdminClient, removed), + newAddCRBs(ctx, shootAdminClient, missing), + } { + if err := fn(); err != nil { + updateCRBApplyFailed(&s.instance) + return updateStatusAndStopWithError(err) + } + } + + return updateStatusAndRequeue() +} diff --git a/internal/controller/runtime/fsm/runtime_fsm_create_kubeconfig.go b/internal/controller/runtime/fsm/runtime_fsm_create_kubeconfig.go index 608c1435..97ec0e79 100644 --- a/internal/controller/runtime/fsm/runtime_fsm_create_kubeconfig.go +++ b/internal/controller/runtime/fsm/runtime_fsm_create_kubeconfig.go @@ -54,7 +54,7 @@ func sFnCreateKubeconfig(ctx context.Context, m *fsm, s *systemState) (stateFn, imv1.ConditionTypeRuntimeKubeconfigReady, imv1.ConditionReasonGardenerCRReady, "Gardener Cluster CR is ready.", - sFnProcessShoot) + sFnApplyClusterRoleBindings) } func makeGardenerClusterForRuntime(runtime imv1.Runtime, shoot *gardener.Shoot) *imv1.GardenerCluster { diff --git a/internal/controller/runtime/fsm/runtime_fsm_patch_shoot.go b/internal/controller/runtime/fsm/runtime_fsm_patch_shoot.go index 5fd72721..3d575966 100644 --- a/internal/controller/runtime/fsm/runtime_fsm_patch_shoot.go +++ b/internal/controller/runtime/fsm/runtime_fsm_patch_shoot.go @@ -35,7 +35,7 @@ func sFnPatchExistingShoot(ctx context.Context, m *fsm, s *systemState) (stateFn if updatedShoot.Generation == s.shoot.Generation { m.log.Info("Gardener shoot for runtime did not change after patch, moving to processing", "Name", s.shoot.Name, "Namespace", s.shoot.Namespace) - return switchState(sFnProcessShoot) + return switchState(sFnApplyClusterRoleBindings) } m.log.Info("Gardener shoot for runtime patched successfully", "Name", s.shoot.Name, "Namespace", s.shoot.Namespace) diff --git a/internal/controller/runtime/fsm/runtime_fsm_process_shoot.go b/internal/controller/runtime/fsm/runtime_fsm_process_shoot.go deleted file mode 100644 index 47956767..00000000 --- a/internal/controller/runtime/fsm/runtime_fsm_process_shoot.go +++ /dev/null @@ -1,20 +0,0 @@ -package fsm - -import ( - "context" - - imv1 "github.com/kyma-project/infrastructure-manager/api/v1" - ctrl "sigs.k8s.io/controller-runtime" -) - -func sFnProcessShoot(_ context.Context, m *fsm, s *systemState) (stateFn, *ctrl.Result, error) { - m.log.Info("Process cluster state - the last one") - - // process shoot get kubeconfig and create cluster role bindings - s.instance.UpdateStateReady( - imv1.ConditionTypeRuntimeProvisioned, - imv1.ConditionReasonConfigurationCompleted, - "Runtime processing completed successfully") - - return updateStatusAndStop() -} diff --git a/internal/controller/runtime/fsm/runtime_fsm_waiting_for_shoot_reconcile.go b/internal/controller/runtime/fsm/runtime_fsm_waiting_for_shoot_reconcile.go index ec79dcd6..f8c65e8a 100644 --- a/internal/controller/runtime/fsm/runtime_fsm_waiting_for_shoot_reconcile.go +++ b/internal/controller/runtime/fsm/runtime_fsm_waiting_for_shoot_reconcile.go @@ -44,7 +44,7 @@ func sFnWaitForShootReconcile(_ context.Context, m *fsm, s *systemState) (stateF case gardener.LastOperationStateSucceeded: m.log.Info(fmt.Sprintf("Shoot %s successfully updated, moving to processing", s.shoot.Name)) - return ensureStatusConditionIsSetAndContinue(&s.instance, imv1.ConditionTypeRuntimeProvisioned, imv1.ConditionReasonProcessing, "Shoot update is completed", sFnProcessShoot) + return ensureStatusConditionIsSetAndContinue(&s.instance, imv1.ConditionTypeRuntimeProvisioned, imv1.ConditionReasonProcessing, "Shoot update is completed", sFnApplyClusterRoleBindings) } m.log.Info("Update did not processed, exiting with no retry")