diff --git a/docs/checks/reliability.md b/docs/checks/reliability.md index b551e1f69..8452fea90 100644 --- a/docs/checks/reliability.md +++ b/docs/checks/reliability.md @@ -21,6 +21,7 @@ key | default | description `topologySpreadConstraint` | `warning` | Fails when there is no topology spread constraint on the pod `hpaMaxAvailability` | `warning` | Fails when `maxAvailable` lesser or equal than `minAvailable` (if defined) for a HorizontalPodAutoscaler `hpaMinAvailability` | `warning` | Fails when `minAvailable` (if defined) lesser or equal to one for a HorizontalPodAutoscaler +`pdbMinAvailableGreaterThanHPAMinReplicas` | `warning` | Fails when PDB `minAvailable` is greater than HPA `minReplicas` ## Background diff --git a/pkg/config/checks.go b/pkg/config/checks.go index 7d1cb334d..9707d8457 100644 --- a/pkg/config/checks.go +++ b/pkg/config/checks.go @@ -69,6 +69,7 @@ var ( "rolebindingClusterAdminRole", "hpaMaxAvailability", "hpaMinAvailability", + "pdbMinAvailableGreaterThanHPAMinReplicas", } // BuiltInChecks contains the checks that come pre-installed w/ Polaris diff --git a/pkg/config/checks/missingPodDisruptionBudget.yaml b/pkg/config/checks/missingPodDisruptionBudget.yaml index 1ff42ad85..434299be4 100644 --- a/pkg/config/checks/missingPodDisruptionBudget.yaml +++ b/pkg/config/checks/missingPodDisruptionBudget.yaml @@ -8,27 +8,23 @@ controllers: schema: "$schema": http://json-schema.org/draft-07/schema# type: object + required: [spec] properties: spec: type: object + required: [template] properties: template: type: object + required: [metadata] properties: metadata: type: object + required: [labels] properties: labels: type: object minProperties: 1 - required: - - labels - required: - - metadata - required: - - template - required: - - spec additionalSchemaStrings: policy/PodDisruptionBudget: | type: object diff --git a/pkg/config/checks/pdbMinAvailableGreaterThanHPAMinReplicas.yaml b/pkg/config/checks/pdbMinAvailableGreaterThanHPAMinReplicas.yaml new file mode 100644 index 000000000..cff63c7c1 --- /dev/null +++ b/pkg/config/checks/pdbMinAvailableGreaterThanHPAMinReplicas.yaml @@ -0,0 +1,7 @@ +successMessage: PDB and HPA are correctly configured +failureMessage: PDB minAvailable is greater than HPA minReplicas +category: Reliability +target: Controller +controllers: + include: + - Deployment diff --git a/pkg/config/default.yaml b/pkg/config/default.yaml index 8796fac90..9fc5266ea 100644 --- a/pkg/config/default.yaml +++ b/pkg/config/default.yaml @@ -12,6 +12,7 @@ checks: topologySpreadConstraint: warning hpaMaxAvailability: warning hpaMinAvailability: warning + pdbMinAvailableGreaterThanHPAMinReplicas: warning # efficiency cpuRequestsMissing: warning diff --git a/pkg/config/examples/config-full.yaml b/pkg/config/examples/config-full.yaml index 2f9598641..aa5247c5a 100644 --- a/pkg/config/examples/config-full.yaml +++ b/pkg/config/examples/config-full.yaml @@ -12,6 +12,7 @@ checks: metadataAndInstanceMismatched: warning hpaMaxAvailability: warning hpaMinAvailability: warning + pdbMinAvailableGreaterThanHPAMinReplicas: warning # efficiency cpuRequestsMissing: warning diff --git a/pkg/kube/resource.go b/pkg/kube/resource.go index 95d4eb29c..55c47951e 100644 --- a/pkg/kube/resource.go +++ b/pkg/kube/resource.go @@ -181,7 +181,7 @@ func resolveControllerFromPod(ctx context.Context, podResource kubeAPICoreV1.Pod err = cacheAllObjectsOfKind(ctx, firstOwner.APIVersion, firstOwner.Kind, dynamicClient, restMapper, objectCache) } if err != nil { - logrus.Warnf("Error caching objects of Kind %s %v", firstOwner.Kind, err) + logrus.Warnf("error caching objects of Kind %s %v", firstOwner.Kind, err) break } abstractObject, ok = objectCache[key] @@ -193,7 +193,7 @@ func resolveControllerFromPod(ctx context.Context, podResource kubeAPICoreV1.Pod objMeta, err := meta.Accessor(&abstractObject) if err != nil { - logrus.Warnf("Error retrieving parent metadata %s of API %s and Kind %s because of error: %v ", firstOwner.Name, firstOwner.APIVersion, firstOwner.Kind, err) + logrus.Warnf("error retrieving parent metadata %s of API %s and Kind %s because of error: %v ", firstOwner.Name, firstOwner.APIVersion, firstOwner.Kind, err) return GenericResource{}, err } podSpec := GetPodSpec(abstractObject.Object) @@ -221,7 +221,7 @@ func cacheSingleObject(ctx context.Context, apiVersion, kind, namespace, name st logrus.Debugf("Caching a single %s", kind) object, err := getObject(ctx, namespace, kind, apiVersion, name, dynamicClient, restMapper) if err != nil { - logrus.Warnf("Error retrieving object %s/%s/%s/%s because of error: %v", kind, apiVersion, namespace, name, err) + logrus.Warnf("error retrieving object %s/%s/%s/%s because of error: %v", kind, apiVersion, namespace, name, err) return err } key := fmt.Sprintf("%s/%s/%s", object.GetKind(), object.GetNamespace(), object.GetName()) @@ -235,13 +235,13 @@ func cacheAllObjectsOfKind(ctx context.Context, apiVersion, kind string, dynamic fqKind := schema.FromAPIVersionAndKind(apiVersion, kind) mapping, err := restMapper.RESTMapping(fqKind.GroupKind(), fqKind.Version) if err != nil { - logrus.Warnf("Error retrieving mapping of API %s and Kind %s because of error: %v", apiVersion, kind, err) + logrus.Warnf("error retrieving mapping of API %s and Kind %s because of error: %v", apiVersion, kind, err) return err } objects, err := dynamicClient.Resource(mapping.Resource).Namespace("").List(ctx, kubeAPIMetaV1.ListOptions{}) if err != nil { - logrus.Warnf("Error retrieving parent object API %s and Kind %s because of error: %v", mapping.Resource.Version, mapping.Resource.Resource, err) + logrus.Warnf("error retrieving parent object API %s and Kind %s because of error: %v", mapping.Resource.Version, mapping.Resource.Resource, err) return err } for idx, object := range objects.Items { diff --git a/pkg/kube/resources.go b/pkg/kube/resources.go index 3ae7de083..5fe419606 100644 --- a/pkg/kube/resources.go +++ b/pkg/kube/resources.go @@ -206,7 +206,7 @@ func CreateResourceProviderFromPath(directory string) (*ResourceProvider, error) } err = resources.addResourcesFromYaml(string(contents)) if err != nil { - logrus.Warnf("Skipping %s: cannot add resource from YAML: %v", path, err) + logrus.Warnf("skipping %s: cannot add resource from YAML: %v", path, err) } return nil } @@ -340,7 +340,7 @@ func CreateResourceProviderFromAPI(ctx context.Context, kube kubernetes.Interfac groupKind := parseGroupKind(maybeTransformKindIntoGroupKind(string(kind))) mapping, err := restMapper.RESTMapping(groupKind) if err != nil { - logrus.Warnf("Error retrieving mapping of Kind %s because of error: %v", kind, err) + logrus.Warnf("error retrieving mapping of Kind %s because of error: %v", kind, err) return nil, err } if c.Namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameNamespace { @@ -351,7 +351,7 @@ func CreateResourceProviderFromAPI(ctx context.Context, kube kubernetes.Interfac logrus.Info("Loading " + kind) objects, err := dynamic.Resource(mapping.Resource).Namespace(c.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { - logrus.Warnf("Error retrieving parent object API %s and Kind %s because of error: %v", mapping.Resource.Version, mapping.Resource.Resource, err) + logrus.Warnf("error retrieving parent object API %s and Kind %s because of error: %v", mapping.Resource.Version, mapping.Resource.Resource, err) return nil, err } for _, obj := range objects.Items { diff --git a/pkg/validator/custom.go b/pkg/validator/custom.go new file mode 100644 index 000000000..417a5183a --- /dev/null +++ b/pkg/validator/custom.go @@ -0,0 +1,19 @@ +package validator + +import ( + "sync" + + "github.com/qri-io/jsonschema" +) + +type validatorFunction func(test schemaTestCase) (bool, []jsonschema.ValError, error) + +var validatorMapper = map[string]validatorFunction{} +var lock = &sync.Mutex{} + +func registerCustomChecks(name string, check validatorFunction) { + lock.Lock() + defer lock.Unlock() + + validatorMapper[name] = check +} diff --git a/pkg/validator/pdb_hpa_validator.go b/pkg/validator/pdb_hpa_validator.go new file mode 100644 index 000000000..e613df000 --- /dev/null +++ b/pkg/validator/pdb_hpa_validator.go @@ -0,0 +1,150 @@ +package validator + +import ( + "fmt" + "strconv" + "strings" + + "github.com/fairwindsops/polaris/pkg/kube" + "github.com/qri-io/jsonschema" + "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func init() { + registerCustomChecks("pdbMinAvailableGreaterThanHPAMinReplicas", pdbMinAvailableGreaterThanHPAMinReplicas) +} + +func pdbMinAvailableGreaterThanHPAMinReplicas(test schemaTestCase) (bool, []jsonschema.ValError, error) { + if test.ResourceProvider == nil { + logrus.Debug("ResourceProvider is nil") + return true, nil, nil + } + + deployment := &appsv1.Deployment{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(test.Resource.Resource.Object, deployment) + if err != nil { + logrus.Warnf("error converting unstructured to Deployment: %v", err) + return true, nil, nil + } + + attachedPDB, err := hasPDBAttached(*deployment, test.ResourceProvider.Resources["policy/PodDisruptionBudget"]) + if err != nil { + logrus.Warnf("error getting PodDisruptionBudget: %v", err) + return true, nil, nil + } + + attachedHPA, err := hasHPAAttached(*deployment, test.ResourceProvider.Resources["autoscaling/HorizontalPodAutoscaler"]) + if err != nil { + logrus.Warnf("error getting HorizontalPodAutoscaler: %v", err) + return true, nil, nil + } + + if attachedPDB != nil && attachedHPA != nil { + logrus.Debugf("both PDB and HPA are attached to deployment %s", deployment.Name) + + pdbMinAvailable, isPercent, err := getIntOrPercentValueSafely(attachedPDB.Spec.MinAvailable) + if err != nil { + logrus.Warnf("error getting getIntOrPercentValueSafely: %v", err) + return true, nil, nil + } + + if isPercent { + // if the value is a percentage, we need to calculate the actual value + if attachedHPA.Spec.MinReplicas == nil { + logrus.Debug("attachedHPA.Spec.MinReplicas is nil") + return true, nil, nil + } + + pdbMinAvailable, err = intstr.GetScaledValueFromIntOrPercent(attachedPDB.Spec.MinAvailable, int(*attachedHPA.Spec.MinReplicas), true) + if err != nil { + logrus.Warnf("error getting minAvailable value from PodDisruptionBudget: %v", err) + return true, nil, nil + } + } + + if attachedHPA.Spec.MinReplicas != nil && pdbMinAvailable >= int(*attachedHPA.Spec.MinReplicas) { + return false, []jsonschema.ValError{ + { + PropertyPath: "spec.minAvailable", + InvalidValue: pdbMinAvailable, + Message: fmt.Sprintf("The minAvailable value in the PodDisruptionBudget(%s) is %d, which is greater or equal than the minReplicas value in the HorizontalPodAutoscaler(%s) (%d)", attachedPDB.Name, pdbMinAvailable, attachedHPA.Name, *attachedHPA.Spec.MinReplicas), + }, + }, nil + } + } + + return true, nil, nil +} + +func hasPDBAttached(deployment appsv1.Deployment, pdbs []kube.GenericResource) (*policyv1.PodDisruptionBudget, error) { + for _, generic := range pdbs { + pdb := &policyv1.PodDisruptionBudget{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(generic.Resource.Object, pdb) + if err != nil { + return nil, fmt.Errorf("error converting unstructured to PodDisruptionBudget: %v", err) + } + + if pdb.Spec.Selector == nil { + logrus.Debug("pdb.Spec.Selector is nil") + continue + } + + if matchesPDBForDeployment(deployment.Spec.Template.Labels, pdb.Spec.Selector.MatchLabels) { + return pdb, nil + } + } + return nil, nil +} + +// matchesPDBForDeployment checks if the labels of the deployment match the labels of the PDB +func matchesPDBForDeployment(deploymentLabels, pdbLabels map[string]string) bool { + for key, value := range pdbLabels { + if deploymentLabels[key] == value { + return true + } + } + return false +} + +func hasHPAAttached(deployment appsv1.Deployment, hpas []kube.GenericResource) (*autoscalingv1.HorizontalPodAutoscaler, error) { + for _, generic := range hpas { + hpa := &autoscalingv1.HorizontalPodAutoscaler{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(generic.Resource.Object, hpa) + if err != nil { + return nil, fmt.Errorf("error converting unstructured to HorizontalPodAutoscaler: %v", err) + } + + if hpa.Spec.ScaleTargetRef.Kind == "Deployment" && hpa.Spec.ScaleTargetRef.Name == deployment.Name { + return hpa, nil + } + } + return nil, nil +} + +// getIntOrPercentValueSafely is a safer version of getIntOrPercentValue based on private function intstr.getIntOrPercentValueSafely +func getIntOrPercentValueSafely(intOrStr *intstr.IntOrString) (int, bool, error) { + switch intOrStr.Type { + case intstr.Int: + return intOrStr.IntValue(), false, nil + case intstr.String: + isPercent := false + s := intOrStr.StrVal + if strings.HasSuffix(s, "%") { + isPercent = true + s = strings.TrimSuffix(intOrStr.StrVal, "%") + } else { + return 0, false, fmt.Errorf("invalid type: string is not a percentage") + } + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), isPercent, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/pkg/validator/schema.go b/pkg/validator/schema.go index 79f19bfe3..73c96d649 100644 --- a/pkg/validator/schema.go +++ b/pkg/validator/schema.go @@ -369,6 +369,8 @@ func applySchemaCheck(conf *config.Configuration, checkID string, test schemaTes passes, issues, err = check.CheckContainer(test.Container) } else if check.Validator.SchemaURI != "" { passes, issues, err = check.CheckObject(test.Resource.Resource.Object) + } else if validatorMapper[checkID] != nil { + passes, issues, err = validatorMapper[checkID](test) } else { passes, issues, err = true, []jsonschema.ValError{}, nil } @@ -380,7 +382,7 @@ func applySchemaCheck(conf *config.Configuration, checkID string, test schemaTes break } if test.ResourceProvider == nil { - logrus.Warnf("No ResourceProvider available, check %s will not work in this context (e.g. admission control)", checkID) + logrus.Warnf("no ResourceProvider available, check %s will not work in this context (e.g. admission control)", checkID) break } resources := test.ResourceProvider.Resources[groupkind] diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure-gt-percent.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure-gt-percent.yaml new file mode 100644 index 000000000..257d60753 --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure-gt-percent.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + replicas: 10 + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 150% # 1.5 * 10 = 15 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 10 + maxReplicas: 15 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure-gt-scalar.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure-gt-scalar.yaml new file mode 100644 index 000000000..bea1a930a --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure-gt-scalar.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 10 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure.equals.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure.equals.yaml new file mode 100644 index 000000000..53d9817e7 --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/failure.equals.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 5 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success-lt-percent.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success-lt-percent.yaml new file mode 100644 index 000000000..02bb809e3 --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success-lt-percent.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + replicas: 10 + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 50% # 0.5 * 10 = 5 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 10 + maxReplicas: 15 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.hpa-no-match.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.hpa-no-match.yaml new file mode 100644 index 000000000..c5ccc03af --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.hpa-no-match.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 5 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: no-match + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.lt.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.lt.yaml new file mode 100644 index 000000000..4292c2acd --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.lt.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 2 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-hpa.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-hpa.yaml new file mode 100644 index 000000000..9e9f0d5ce --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-hpa.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 5 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-match.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-match.yaml new file mode 100644 index 000000000..1cd362598 --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-match.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 5 + selector: + matchLabels: + app.kubernetes.io/name: no-match +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: no-match + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-min-replicas.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-min-replicas.yaml new file mode 100644 index 000000000..874dca5b1 --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-min-replicas.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 2 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + maxReplicas: 5 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-pdb.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-pdb.yaml new file mode 100644 index 000000000..cc9d8fbbf --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.no-pdb.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.pdb-no-match.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.pdb-no-match.yaml new file mode 100644 index 000000000..b9c1a4cce --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.pdb-no-match.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 5 + selector: + matchLabels: + app.kubernetes.io/name: no-match +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.percent-no-replica.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.percent-no-replica.yaml new file mode 100644 index 000000000..e1fa8b1e3 --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.percent-no-replica.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: zookeeper-pdb +spec: + minAvailable: 50% + selector: + matchLabels: + app.kubernetes.io/name: zookeeper +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: zookeeper-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: zookeeper + minReplicas: 5 + maxReplicas: 7 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.yaml b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.yaml new file mode 100644 index 000000000..034257aad --- /dev/null +++ b/test/checks/pdbMinAvailableGreaterThanHPAMinReplicas/success.yaml @@ -0,0 +1,14 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper +spec: + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + foo: bar + spec: + containers: + - name: zookeeper + image: zookeeper \ No newline at end of file diff --git a/test/mutation_test.go b/test/mutation_test.go index bfc74eb63..b0814b967 100644 --- a/test/mutation_test.go +++ b/test/mutation_test.go @@ -34,6 +34,8 @@ func TestMutations(t *testing.T) { c, err := config.Parse([]byte(configYaml)) assert.NoError(t, err) assert.Len(t, c.Mutations, 0) + + _, mutatedYamlContentMap, mutationTestCasesMap := initTestCases() for mutationStr := range mutationTestCasesMap { if len(mutationTestCasesMap[mutationStr]) == 0 { panic("No test cases found for " + mutationStr) diff --git a/test/schema_test.go b/test/schema_test.go index ca09d6d92..0cad1931f 100644 --- a/test/schema_test.go +++ b/test/schema_test.go @@ -29,8 +29,6 @@ import ( "github.com/fairwindsops/polaris/pkg/validator" ) -var testCases = []testCase{} - type testCase struct { check string filename string @@ -40,10 +38,7 @@ type testCase struct { manifest string } -var mutatedYamlContentMap = map[string]string{} -var mutationTestCasesMap = map[string][]testCase{} - -func init() { +func initTestCases() ([]testCase, map[string]string, map[string][]testCase) { checkToTest := os.Getenv("POLARIS_CHECK_TEST") // if set, only run tests for this check _, baseDir, _, _ := runtime.Caller(0) baseDir = filepath.Dir(baseDir) + "/checks" @@ -51,6 +46,12 @@ func init() { if err != nil { panic(err) } + if checkToTest != "" { + fmt.Printf("POLARIS_CHECK_TEST is set... Running tests for '%s' only\n", checkToTest) + } + var testCases = []testCase{} + var mutatedYamlContentMap = map[string]string{} + var mutationTestCasesMap = map[string][]testCase{} for _, dir := range dirs { check := dir.Name() if checkToTest != "" && checkToTest != check { @@ -116,9 +117,11 @@ func init() { } } } + return testCases, mutatedYamlContentMap, mutationTestCasesMap } func TestChecks(t *testing.T) { + testCases, _, _ := initTestCases() for _, tc := range testCases { results, err := validator.ApplyAllSchemaChecksToResourceProvider(&tc.config, tc.resources) if err != nil {