From 2354b901b4ef64dede8d0b16d87bae9a8953e847 Mon Sep 17 00:00:00 2001 From: maltemorgenstern <65773564+maltemorgenstern@users.noreply.github.com> Date: Thu, 27 Jun 2024 21:51:24 +0200 Subject: [PATCH 1/3] feat: Provide credentials in imagePullSecret without global access When running a cluster that contains images from a private registry one needs to configure authentication. This is done by using kubernetes ImagePullSecrets. By default the trivy-operator is able to read the secrets attached to a target workload and use them to access the container registry. While this is necessary when working with different registries inside the cluster, this comes with one security downside: the operator needs access to all secrets inside the cluster. If all images are being pulled from a single private registry then one ImagePullSecret can be used for all of them. The easiest one to use is inside the operator namespace. This change does not impact any deployments running with default settings (global access enabled). But in case one disables that access this allows to instead read a single ImagePullSecret and use it for all images. --- pkg/kube/secrets.go | 27 ++++++++++----- .../controller/workload.go | 33 ++++++++++++++----- 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/pkg/kube/secrets.go b/pkg/kube/secrets.go index 99647cb76..9d302b2a2 100644 --- a/pkg/kube/secrets.go +++ b/pkg/kube/secrets.go @@ -124,7 +124,7 @@ const ( type SecretsReader interface { ListByLocalObjectReferences(ctx context.Context, refs []corev1.LocalObjectReference, ns string) ([]corev1.Secret, error) ListImagePullSecretsByPodSpec(ctx context.Context, spec corev1.PodSpec, ns string) ([]corev1.Secret, error) - CredentialsByServer(ctx context.Context, workload client.Object, secretsInfo map[string]string, multiSecretSupport bool) (map[string]docker.Auth, error) + CredentialsByServer(ctx context.Context, workload client.Object, secretsInfo map[string]string, multiSecretSupport bool, globalAccessEnabled bool) (map[string]docker.Auth, error) } // NewSecretsReader constructs a new SecretsReader which is using the client @@ -208,19 +208,28 @@ func (r *secretsReader) GetSecretsFromEnv(ctx context.Context, secretsInfo map[s return secretsFromEnv, nil } -func (r *secretsReader) CredentialsByServer(ctx context.Context, workload client.Object, secretsInfo map[string]string, multiSecretSupport bool) (map[string]docker.Auth, error) { - spec, err := GetPodSpec(workload) - if err != nil { - return nil, fmt.Errorf("getting Pod template: %w", err) - } - imagePullSecrets, err := r.ListImagePullSecretsByPodSpec(ctx, spec, workload.GetNamespace()) - if err != nil { - return nil, err +func (r *secretsReader) CredentialsByServer(ctx context.Context, workload client.Object, secretsInfo map[string]string, multiSecretSupport bool, globalAccessEnabled bool) (map[string]docker.Auth, error) { + var imagePullSecrets []corev1.Secret + + if globalAccessEnabled { + spec, err := GetPodSpec(workload) + if err != nil { + return nil, fmt.Errorf("getting Pod template: %w", err) + } + + imagePullSecretsFromSpec, err := r.ListImagePullSecretsByPodSpec(ctx, spec, workload.GetNamespace()) + if err != nil { + return nil, err + } + + imagePullSecrets = append(imagePullSecrets, imagePullSecretsFromSpec...) } + secretsFromEnv, err := r.GetSecretsFromEnv(ctx, secretsInfo) if err != nil { return nil, err } + imagePullSecrets = append(imagePullSecrets, secretsFromEnv...) return MapDockerRegistryServersToAuths(imagePullSecrets, multiSecretSupport) diff --git a/pkg/vulnerabilityreport/controller/workload.go b/pkg/vulnerabilityreport/controller/workload.go index 919a23368..33eaefa12 100644 --- a/pkg/vulnerabilityreport/controller/workload.go +++ b/pkg/vulnerabilityreport/controller/workload.go @@ -252,19 +252,34 @@ func (r *WorkloadController) SubmitScanJob(ctx context.Context, owner client.Obj "name", owner.GetName(), "namespace", owner.GetNamespace()) var err error credentials := make(map[string]docker.Auth, 0) + + privateRegistrySecrets, err := r.Config.GetPrivateRegistryScanSecretsNames() + if err != nil { + return err + } + + pConfig, err := r.PluginContext.GetConfig() + if err != nil { + return err + } + + multiSecretSupport := trivy.MultiSecretSupport(trivy.Config{PluginConfig: pConfig}) + if r.AccessGlobalSecretsAndServiceAccount && len(reusedReports) == 0 { - privateRegistrySecrets, err := r.Config.GetPrivateRegistryScanSecretsNames() - if err != nil { - return err - } - pConfig, err := r.PluginContext.GetConfig() + // Global access is enabled - therefore imagePullSecrets from the podSpec can be used + credentials, err = r.CredentialsByServer(ctx, owner, privateRegistrySecrets, multiSecretSupport, true) if err != nil { return err } - multiSecretSupport := trivy.MultiSecretSupport(trivy.Config{PluginConfig: pConfig}) - credentials, err = r.CredentialsByServer(ctx, owner, privateRegistrySecrets, multiSecretSupport) - if err != nil { - return err + } else { + // Global access is disabled - check if privateRegistrySecrets references an imagePullSecret in the operator namespace + imagePullSecretName, ok := privateRegistrySecrets[r.Config.Namespace] + if ok { + // privateRegistrySecrets references an imagePullSecret in the operator namespace - use this for pulling private images + credentials, err = r.CredentialsByServer(ctx, owner, map[string]string{r.Config.Namespace: imagePullSecretName}, multiSecretSupport, false) + if err != nil { + return err + } } } From 72b9b9e8d0b7cdb234e80a632b32bece24cbde51 Mon Sep 17 00:00:00 2001 From: maltemorgenstern <65773564+maltemorgenstern@users.noreply.github.com> Date: Sun, 21 Jul 2024 16:53:06 +0200 Subject: [PATCH 2/3] test: Add tests for CredentialsByServer function The unit tests have been fixed and now cover three different scenarios: - No credentials have been configured at all - ImagePullSecret for workload exists but global access is disabled - ImagePullSecret for workload exists and global access is enabled Also a small typo in the CONTRIBUTING.md is being fixed. --- CONTRIBUTING.md | 2 +- pkg/kube/secrets_test.go | 75 ++++++++++++++++++- .../secret_same_domain_imagePullSecret.json | 15 ++++ 3 files changed, 88 insertions(+), 4 deletions(-) create mode 100644 pkg/kube/testdata/fixture/secret_same_domain_imagePullSecret.json diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 19cd0e4ea..ad42fd69b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -134,7 +134,7 @@ go tool cover -html=coverage.txt ### Run Operator envtest -The Operator envtest spin us partial k8s components (api-server, etcd) and test controllers for reousce, workload, ttl, rbac and more +The Operator envtest spins up partial k8s components (api-server, etcd) and test controllers for resource, workload, ttl, rbac and more ``` mage test:envtest diff --git a/pkg/kube/secrets_test.go b/pkg/kube/secrets_test.go index 94050ad24..15c734b23 100644 --- a/pkg/kube/secrets_test.go +++ b/pkg/kube/secrets_test.go @@ -17,6 +17,7 @@ import ( "github.com/aquasecurity/trivy-operator/pkg/docker" "github.com/aquasecurity/trivy-operator/pkg/kube" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestMapDockerRegistryServersToAuths(t *testing.T) { @@ -360,15 +361,37 @@ func loadResource(filePath string, resource interface{}) error { } func Test_secretsReader_CredentialsByServer(t *testing.T) { - t.Run("Test with service account and secret with same registry domain should map container images to Docker authentication credentials from Pod secret", func(t *testing.T) { + t.Run("Test with no secrets or serviceaccounts configured and globalAccess disabled should not map any credentials", func(t *testing.T) { + + client := fake.NewClientBuilder().WithScheme(trivyoperator.NewScheme()).Build() + sr := kube.NewSecretsReader(client) + pod := corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "quay.io/nginx:1.16", + }, + }, + }, + } + + auths, err := sr.CredentialsByServer(context.Background(), &pod, map[string]string{}, false, false) + require.NoError(t, err) + assert.Equal(t, 0, len(auths)) + }) + + t.Run("Test with secrets configured but globalAccess disabled should map container images to Docker authentication credentials from serviceaccount secret", func(t *testing.T) { var secret corev1.Secret err := loadResource("./testdata/fixture/secret_same_domain.json", &secret) require.NoError(t, err) + var imagePullSecret corev1.Secret + err = loadResource("./testdata/fixture/secret_same_domain_imagePullSecret.json", &imagePullSecret) + require.NoError(t, err) var sa corev1.ServiceAccount err = loadResource("./testdata/fixture/sa_with_image_pull_secret_same_domain.json", &sa) require.NoError(t, err) - client := fake.NewClientBuilder().WithScheme(trivyoperator.NewScheme()).WithObjects(&secret).WithObjects(&sa).Build() + client := fake.NewClientBuilder().WithScheme(trivyoperator.NewScheme()).WithObjects(&secret).WithObjects(&sa).WithObjects(&imagePullSecret).Build() sr := kube.NewSecretsReader(client) pod := corev1.Pod{ Spec: corev1.PodSpec{ @@ -377,16 +400,62 @@ func Test_secretsReader_CredentialsByServer(t *testing.T) { Image: "quay.io/nginx:1.16", }, }, + ImagePullSecrets: []corev1.LocalObjectReference{ + { + Name: "private-regcred", + }, + }, }, } auths, err := sr.CredentialsByServer(context.Background(), &pod, map[string]string{ "default": "regcred", - }, false) + }, false, false) require.NoError(t, err) assert.Equal(t, 1, len(auths)) assert.Equal(t, map[string]docker.Auth{ "quay.io": {Auth: "dXNlcjpBZG1pbjEyMzQ1", Username: "user", Password: "Admin12345"}, }, auths) }) + + t.Run("Test with secrets configured and globalAccess enabled should map container images to Docker authentication credentials from serviceaccount and imagePullSecret", func(t *testing.T) { + + var secret corev1.Secret + err := loadResource("./testdata/fixture/secret_same_domain.json", &secret) + require.NoError(t, err) + var imagePullSecret corev1.Secret + err = loadResource("./testdata/fixture/secret_same_domain_imagePullSecret.json", &imagePullSecret) + require.NoError(t, err) + var sa corev1.ServiceAccount + err = loadResource("./testdata/fixture/sa_with_image_pull_secret_same_domain.json", &sa) + require.NoError(t, err) + client := fake.NewClientBuilder().WithScheme(trivyoperator.NewScheme()).WithObjects(&secret).WithObjects(&sa).WithObjects(&imagePullSecret).Build() + sr := kube.NewSecretsReader(client) + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Image: "quay.io/nginx:1.16", + }, + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + { + Name: "private-regcred", + }, + }, + }, + } + + auths, err := sr.CredentialsByServer(context.Background(), &pod, map[string]string{ + "default": "regcred", + }, true, true) + require.NoError(t, err) + assert.Equal(t, 1, len(auths)) + assert.Equal(t, map[string]docker.Auth{ + "quay.io": {Auth: "", Username: "admin,user", Password: "Password12345,Admin12345"}, + }, auths) + }) } diff --git a/pkg/kube/testdata/fixture/secret_same_domain_imagePullSecret.json b/pkg/kube/testdata/fixture/secret_same_domain_imagePullSecret.json new file mode 100644 index 000000000..008470fd2 --- /dev/null +++ b/pkg/kube/testdata/fixture/secret_same_domain_imagePullSecret.json @@ -0,0 +1,15 @@ +{ + "apiVersion": "v1", + "data": { + ".dockerconfigjson": "eyJhdXRocyI6eyJxdWF5LmlvIjp7ImF1dGgiOiJZV1J0YVc0NlVHRnpjM2R2Y21ReE1qTTBOUT09In19fQ==" + }, + "kind": "Secret", + "metadata": { + "creationTimestamp": "2022-07-10T08:36:58Z", + "name": "private-regcred", + "namespace": "default", + "resourceVersion": "317629", + "uid": "ab0f9c59-ee6d-4c2d-98f6-a6fac0e9a35d" + }, + "type": "kubernetes.io/dockerconfigjson" +} From 46ef0f64aebe302b7967c16847adf373f4ff6df4 Mon Sep 17 00:00:00 2001 From: maltemorgenstern <65773564+maltemorgenstern@users.noreply.github.com> Date: Sun, 21 Jul 2024 19:45:39 +0200 Subject: [PATCH 3/3] fix: Allow trivy-operator to update secrets in the operator namespace In [1] the trivy-operator updates the temporary secrets to reflect the ownership by each scan job. This requires the 'update' persmission for secrets - otherwise the call with result in an error ("forbidden"). While the ClusterRole has this permission the Role does not. This needs to be added to run the trivy-operator without global access - while still using imagePullSecrets to configure authentication for private registries. [1]: https://github.com/aquasecurity/trivy-operator/blob/d5d7e3d25c5e98f92c6a596af639b1f8df721869/pkg/vulnerabilityreport/controller/workload.go#L380-L389 --- deploy/helm/templates/rbac/role.yaml | 1 + deploy/static/trivy-operator.yaml | 57 ++++++++++++++-------------- 2 files changed, 30 insertions(+), 28 deletions(-) diff --git a/deploy/helm/templates/rbac/role.yaml b/deploy/helm/templates/rbac/role.yaml index 6c896065f..9b835f9dd 100644 --- a/deploy/helm/templates/rbac/role.yaml +++ b/deploy/helm/templates/rbac/role.yaml @@ -23,4 +23,5 @@ rules: - create - get - delete + - update {{- end }} diff --git a/deploy/static/trivy-operator.yaml b/deploy/static/trivy-operator.yaml index 2519abc0b..d43683146 100644 --- a/deploy/static/trivy-operator.yaml +++ b/deploy/static/trivy-operator.yaml @@ -3169,6 +3169,25 @@ spec: app.kubernetes.io/instance: trivy-operator type: ClusterIP --- +# Source: trivy-operator/templates/rbac/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: trivy-operator + labels: + app.kubernetes.io/name: trivy-operator + app.kubernetes.io/instance: trivy-operator + app.kubernetes.io/version: "0.21.3" + app.kubernetes.io/managed-by: kubectl +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: trivy-operator +subjects: + - kind: ServiceAccount + name: trivy-operator + namespace: trivy-system +--- # Source: trivy-operator/templates/rbac/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -3537,11 +3556,12 @@ rules: verbs: - get --- -# Source: trivy-operator/templates/rbac/clusterrolebinding.yaml +# Source: trivy-operator/templates/rbac/leader-election-rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: - name: trivy-operator + name: trivy-operator-leader-election + namespace: trivy-system labels: app.kubernetes.io/name: trivy-operator app.kubernetes.io/instance: trivy-operator @@ -3549,8 +3569,8 @@ metadata: app.kubernetes.io/managed-by: kubectl roleRef: apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: trivy-operator + kind: Role + name: trivy-operator-leader-election subjects: - kind: ServiceAccount name: trivy-operator @@ -3584,11 +3604,11 @@ rules: verbs: - create --- -# Source: trivy-operator/templates/rbac/leader-election-rolebinding.yaml +# Source: trivy-operator/templates/rbac/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: trivy-operator-leader-election + name: trivy-operator namespace: trivy-system labels: app.kubernetes.io/name: trivy-operator @@ -3598,7 +3618,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: trivy-operator-leader-election + name: trivy-operator subjects: - kind: ServiceAccount name: trivy-operator @@ -3633,26 +3653,7 @@ rules: - create - get - delete ---- -# Source: trivy-operator/templates/rbac/rolebinding.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: trivy-operator - namespace: trivy-system - labels: - app.kubernetes.io/name: trivy-operator - app.kubernetes.io/instance: trivy-operator - app.kubernetes.io/version: "0.21.3" - app.kubernetes.io/managed-by: kubectl -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: trivy-operator -subjects: - - kind: ServiceAccount - name: trivy-operator - namespace: trivy-system + - update --- # Source: trivy-operator/templates/rbac/view-configauditreports-clusterrole.yaml # permissions for end users to view configauditreports