Skip to content

Commit

Permalink
Merge pull request #902 from equinor/master
Browse files Browse the repository at this point in the history
Release radix-operator
  • Loading branch information
nilsgstrabo authored Aug 7, 2023
2 parents 196c25a + 381e7b4 commit 3b0bf0a
Show file tree
Hide file tree
Showing 9 changed files with 232 additions and 148 deletions.
4 changes: 2 additions & 2 deletions charts/radix-operator/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v2
name: radix-operator
version: 1.20.0
appVersion: 1.40.0
version: 1.20.2
appVersion: 1.40.2
kubeVersion: ">=1.24.0"
description: Radix Operator
keywords:
Expand Down
2 changes: 2 additions & 0 deletions charts/radix-operator/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ spec:
labels:
{{- include "radix-operator.selectorLabels" . | nindent 8 }}
spec:
strategy:
type: Recreate
serviceAccount: {{ include "radix-operator.serviceAccountName" . }}
securityContext:
runAsNonRoot: true
Expand Down
13 changes: 10 additions & 3 deletions pkg/apis/application/application_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
radixclient "github.com/equinor/radix-operator/pkg/client/clientset/versioned"
fakeradix "github.com/equinor/radix-operator/pkg/client/clientset/versioned/fake"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -247,21 +248,27 @@ func TestOnSync_RegistrationCreated_AppNamespaceReconciled(t *testing.T) {
func TestOnSync_NoUserGroupDefined_DefaultUserGroupSet(t *testing.T) {
// Setup
tu, client, kubeUtil, radixClient := setupTest()
defaultRole := "9876-54321-09876"
defer os.Clearenv()
os.Setenv(defaults.OperatorDefaultUserGroupEnvironmentVariable, "9876-54321-09876")
os.Setenv(defaults.OperatorDefaultUserGroupEnvironmentVariable, defaultRole)

// Test
applyRegistrationWithSync(tu, client, kubeUtil, radixClient, utils.ARadixRegistration().
WithName("any-app").
WithAdGroups([]string{}))
WithAdGroups([]string{}).
WithReaderAdGroups([]string{}))

rolebindings, _ := client.RbacV1().RoleBindings("any-app-app").List(context.TODO(), metav1.ListOptions{})
assert.Equal(t, 5, len(rolebindings.Items))
assert.True(t, roleBindingByNameExists(defaults.AppAdminRoleName, rolebindings))
assert.True(t, roleBindingByNameExists(defaults.PipelineAppRoleName, rolebindings))
assert.True(t, roleBindingByNameExists(defaults.RadixTektonAppRoleName, rolebindings))
assert.Equal(t, "9876-54321-09876", getRoleBindingByName(defaults.AppAdminRoleName, rolebindings).Subjects[0].Name)
assert.Equal(t, defaultRole, getRoleBindingByName(defaults.AppAdminRoleName, rolebindings).Subjects[0].Name)

clusterRoleBindings, _ := client.RbacV1().ClusterRoleBindings().List(context.Background(), metav1.ListOptions{})
require.Len(t, getClusterRoleBindingByName("radix-platform-user-rr-any-app", clusterRoleBindings).Subjects, 1)
assert.Equal(t, defaultRole, getClusterRoleBindingByName("radix-platform-user-rr-any-app", clusterRoleBindings).Subjects[0].Name)
assert.Len(t, getClusterRoleBindingByName("radix-platform-user-rr-reader-any-app", clusterRoleBindings).Subjects, 0)
}

func TestOnSync_LimitsDefined_LimitsSet(t *testing.T) {
Expand Down
29 changes: 18 additions & 11 deletions pkg/apis/application/rolebinding.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package application

import (
"fmt"

"github.com/equinor/radix-operator/pkg/apis/defaults"
"github.com/equinor/radix-operator/pkg/apis/defaults/k8s"
"github.com/equinor/radix-operator/pkg/apis/kube"
Expand Down Expand Up @@ -46,31 +47,34 @@ func (app Application) applyRbacAppNamespace() error {

// ApplyRbacRadixRegistration Grants access to radix registration
func (app Application) applyRbacRadixRegistration() error {
k := app.kubeutil

rr := app.registration
appName := rr.Name

// Admin RBAC
clusterRoleName := fmt.Sprintf("radix-platform-user-rr-%s", appName)
clusterRoleReaderName := fmt.Sprintf("radix-platform-user-rr-reader-%s", appName)

adminClusterRole := app.rrClusterRole(clusterRoleName, []string{"get", "list", "watch", "update", "patch", "delete"})
appAdminSubjects := getAppAdminSubjects(rr)
appAdminSubjects, err := getAppAdminSubjects(rr)
if err != nil {
return err
}
adminClusterRoleBinding := app.rrClusterroleBinding(adminClusterRole, appAdminSubjects)

// Reader RBAC
clusterRoleReaderName := fmt.Sprintf("radix-platform-user-rr-reader-%s", appName)
readerClusterRole := app.rrClusterRole(clusterRoleReaderName, []string{"get", "list", "watch"})
appReaderSubjects := kube.GetRoleBindingGroups(rr.Spec.ReaderAdGroups)
readerClusterRoleBinding := app.rrClusterroleBinding(readerClusterRole, appReaderSubjects)

// Apply roles and bindings
for _, clusterRole := range []*auth.ClusterRole{adminClusterRole, readerClusterRole} {
err := k.ApplyClusterRole(clusterRole)
err := app.kubeutil.ApplyClusterRole(clusterRole)
if err != nil {
return err
}
}

for _, clusterRoleBindings := range []*auth.ClusterRoleBinding{adminClusterRoleBinding, readerClusterRoleBinding} {
err := k.ApplyClusterRoleBinding(clusterRoleBindings)
err := app.kubeutil.ApplyClusterRoleBinding(clusterRoleBindings)
if err != nil {
return err
}
Expand All @@ -79,17 +83,20 @@ func (app Application) applyRbacRadixRegistration() error {
return nil
}

func getAppAdminSubjects(rr *v1.RadixRegistration) []auth.Subject {
subjects := kube.GetRoleBindingGroups(rr.Spec.AdGroups)

func getAppAdminSubjects(rr *v1.RadixRegistration) ([]auth.Subject, error) {
adGroups, err := utils.GetAdGroups(rr)
if err != nil {
return nil, err
}
subjects := kube.GetRoleBindingGroups(adGroups)
if rr.Spec.MachineUser {
subjects = append(subjects, auth.Subject{
Kind: "ServiceAccount",
Name: defaults.GetMachineUserRoleName(rr.Name),
Namespace: utils.GetAppNamespace(rr.Name),
})
}
return subjects
return subjects, nil
}

// ApplyRbacOnPipelineRunner Grants access to radix pipeline
Expand Down
135 changes: 134 additions & 1 deletion pkg/apis/deployment/deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,17 @@ import (
"context"
"errors"
"fmt"
autoscalingv2 "k8s.io/api/autoscaling/v2"
"os"
"strconv"
"strings"
"testing"
"time"

autoscalingv2 "k8s.io/api/autoscaling/v2"

radixutils "github.com/equinor/radix-common/utils"
radixmaps "github.com/equinor/radix-common/utils/maps"
"github.com/equinor/radix-common/utils/pointers"
"github.com/equinor/radix-common/utils/slice"
"github.com/equinor/radix-operator/pkg/apis/defaults"
"github.com/equinor/radix-operator/pkg/apis/kube"
Expand All @@ -40,6 +42,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
kubefake "k8s.io/client-go/kubernetes/fake"
secretProvider "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned"
Expand Down Expand Up @@ -314,6 +317,17 @@ func TestObjectSynced_MultiComponent_ContainsAllElements(t *testing.T) {
deploy := getDeploymentByName(componentName, deployments)
assert.Equal(t, componentName, deploy.Spec.Template.Labels[kube.RadixComponentLabel], "invalid/missing value for label component-name")
}

expectedStartegy := appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &appsv1.RollingUpdateDeployment{
MaxUnavailable: &intstr.IntOrString{Type: intstr.String, StrVal: "25%"},
MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "25%"},
},
}
for _, deployment := range deployments {
assert.Equal(t, expectedStartegy, deployment.Spec.Strategy)
}
})

t.Run(fmt.Sprintf("%s: validate hpa", testScenario), func(t *testing.T) {
Expand Down Expand Up @@ -1491,6 +1505,125 @@ func TestObjectUpdated_ZeroReplicasExistsAndNotSpecifiedReplicas_SetsDefaultRepl
assert.Equal(t, int32(1), *deployments.Items[0].Spec.Replicas)
}

func TestObjectSynced_DeploymentReplicasSetAccordingToSpec(t *testing.T) {
tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest()
defer teardownTest()
envNamespace := utils.GetEnvironmentNamespace("anyapp", "test")

// Test
applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment().
WithDeploymentName("a_deployment_name").
WithAppName("anyapp").
WithEnvironment("test").
WithComponents(
utils.NewDeployComponentBuilder().WithName("comp1"),
utils.NewDeployComponentBuilder().WithName("comp2").WithReplicas(pointers.Ptr(2)),
utils.NewDeployComponentBuilder().WithName("comp3").WithReplicas(pointers.Ptr(4)).WithHorizontalScaling(pointers.Ptr(int32(5)), int32(10), nil, nil),
utils.NewDeployComponentBuilder().WithName("comp4").WithReplicas(pointers.Ptr(6)).WithHorizontalScaling(pointers.Ptr(int32(5)), int32(10), nil, nil),
utils.NewDeployComponentBuilder().WithName("comp5").WithReplicas(pointers.Ptr(11)).WithHorizontalScaling(pointers.Ptr(int32(5)), int32(10), nil, nil),
utils.NewDeployComponentBuilder().WithName("comp6").WithReplicas(pointers.Ptr(0)).WithHorizontalScaling(pointers.Ptr(int32(5)), int32(10), nil, nil),
utils.NewDeployComponentBuilder().WithName("comp7").WithHorizontalScaling(pointers.Ptr(int32(5)), int32(10), nil, nil),
))

comp1, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{})
assert.Equal(t, int32(1), *comp1.Spec.Replicas)
comp2, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp2", metav1.GetOptions{})
assert.Equal(t, int32(2), *comp2.Spec.Replicas)
comp3, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp3", metav1.GetOptions{})
assert.Equal(t, int32(5), *comp3.Spec.Replicas)
comp4, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp4", metav1.GetOptions{})
assert.Equal(t, int32(6), *comp4.Spec.Replicas)
comp5, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp5", metav1.GetOptions{})
assert.Equal(t, int32(10), *comp5.Spec.Replicas)
comp6, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp6", metav1.GetOptions{})
assert.Equal(t, int32(0), *comp6.Spec.Replicas)
comp7, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp7", metav1.GetOptions{})
assert.Equal(t, int32(5), *comp7.Spec.Replicas)
}

func TestObjectSynced_DeploymentReplicasFromCurrentDeploymentWhenHPAEnabled(t *testing.T) {
tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest()
defer teardownTest()
envNamespace := utils.GetEnvironmentNamespace("anyapp", "test")

// Initial sync creating deployments should use replicas from spec
_, err := applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment().
WithDeploymentName("deployment1").
WithAppName("anyapp").
WithEnvironment("test").
WithComponents(
utils.NewDeployComponentBuilder().WithName("comp1").WithReplicas(pointers.Ptr(1)).WithHorizontalScaling(pointers.Ptr(int32(1)), int32(4), nil, nil),
))
require.NoError(t, err)

comp1, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{})
assert.Equal(t, int32(1), *comp1.Spec.Replicas)

// Simulate HPA scaling up comp1 to 3 replicas
comp1.Spec.Replicas = pointers.Ptr[int32](3)
client.AppsV1().Deployments(envNamespace).Update(context.Background(), comp1, metav1.UpdateOptions{})

// Resync existing RD should use replicas from current deployment for HPA enabled component
err = applyDeploymentUpdateWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment().
WithDeploymentName("deployment1").
WithAppName("anyapp").
WithEnvironment("test").
WithComponents(
utils.NewDeployComponentBuilder().WithName("comp1").WithReplicas(pointers.Ptr(1)).WithHorizontalScaling(pointers.Ptr(int32(1)), int32(4), nil, nil),
))
require.NoError(t, err)

comp1, _ = client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{})
assert.Equal(t, int32(3), *comp1.Spec.Replicas)

// Resync new RD should use replicas from current deployment for HPA enabled component
_, err = applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment().
WithDeploymentName("deployment2").
WithAppName("anyapp").
WithEnvironment("test").
WithComponents(
utils.NewDeployComponentBuilder().WithName("comp1").WithReplicas(pointers.Ptr(1)).WithHorizontalScaling(pointers.Ptr(int32(1)), int32(4), nil, nil),
))
require.NoError(t, err)

comp1, _ = client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{})
assert.Equal(t, int32(3), *comp1.Spec.Replicas)

// Resync new RD with HPA removed should use replicas from RD spec
_, err = applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment().
WithDeploymentName("deployment3").
WithAppName("anyapp").
WithEnvironment("test").
WithComponents(
utils.NewDeployComponentBuilder().WithName("comp1").WithReplicas(pointers.Ptr(1)),
))
require.NoError(t, err)

comp1, _ = client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{})
assert.Equal(t, int32(1), *comp1.Spec.Replicas)
}

func TestObjectSynced_DeploymentRevisionHistoryLimit(t *testing.T) {
tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest()
defer teardownTest()
envNamespace := utils.GetEnvironmentNamespace("anyapp", "test")

// Test
applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment().
WithDeploymentName("a_deployment_name").
WithAppName("anyapp").
WithEnvironment("test").
WithComponents(
utils.NewDeployComponentBuilder().WithName("comp1"),
utils.NewDeployComponentBuilder().WithName("comp2").WithSecretRefs(v1.RadixSecretRefs{AzureKeyVaults: []v1.RadixAzureKeyVault{{}}}),
))

comp1, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{})
assert.Nil(t, comp1.Spec.RevisionHistoryLimit)
comp2, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp2", metav1.GetOptions{})
assert.Equal(t, pointers.Ptr(int32(0)), comp2.Spec.RevisionHistoryLimit)
}

func TestObjectUpdated_MultipleReplicasExistsAndNotSpecifiedReplicas_SetsDefaultReplicaCount(t *testing.T) {
tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest()
defer teardownTest()
Expand Down
6 changes: 2 additions & 4 deletions pkg/apis/deployment/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package deployment
import (
"context"
"fmt"

"github.com/equinor/radix-common/utils/numbers"
"github.com/equinor/radix-operator/pkg/apis/kube"
v1 "github.com/equinor/radix-operator/pkg/apis/radix/v1"
Expand All @@ -18,7 +19,6 @@ const targetCPUUtilizationPercentage int32 = 80
func (deploy *Deployment) createOrUpdateHPA(deployComponent v1.RadixCommonDeployComponent) error {
namespace := deploy.radixDeployment.Namespace
componentName := deployComponent.GetName()
replicas := deployComponent.GetReplicas()
horizontalScaling := deployComponent.GetHorizontalScaling()

// Check if hpa config exists
Expand All @@ -27,9 +27,7 @@ func (deploy *Deployment) createOrUpdateHPA(deployComponent v1.RadixCommonDeploy
return nil
}

// Check if replicas == 0
if replicas != nil && *replicas == 0 {
log.Debugf("Skip creating HorizontalPodAutoscaler %s in namespace %s: replicas is 0", componentName, namespace)
if isComponentStopped(deployComponent) {
return nil
}

Expand Down
Loading

0 comments on commit 3b0bf0a

Please sign in to comment.