Skip to content

Commit

Permalink
Add replica exporter test and fix a go test
Browse files Browse the repository at this point in the history
  • Loading branch information
jmckulk committed Sep 19, 2023
1 parent a36bc42 commit 5e4cc4e
Show file tree
Hide file tree
Showing 6 changed files with 101 additions and 15 deletions.
10 changes: 3 additions & 7 deletions internal/controller/postgrescluster/pgmonitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,13 +222,9 @@ func (r *Reconciler) reconcileMonitoringSecret(
intent.Data = make(map[string][]byte)

// Copy existing password and verifier into the intent
password, set := existing.Data["password"]
if set {
intent.Data["password"] = password
verifier, set := existing.Data["verifier"]
if set {
intent.Data["verifier"] = verifier
}
if existing.Data != nil {
intent.Data["password"] = existing.Data["password"]
intent.Data["verifier"] = existing.Data["verifier"]
}

// When password is unset, generate a new one
Expand Down
13 changes: 5 additions & 8 deletions internal/controller/postgrescluster/pgmonitor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,7 @@ func TestAddPGMonitorExporterToInstancePodSpec(t *testing.T) {
expectedENV := []corev1.EnvVar{
{Name: "DATA_SOURCE_URI", Value: fmt.Sprintf("localhost:%d/postgres", *cluster.Spec.Port)},
{Name: "DATA_SOURCE_USER", Value: pgmonitor.MonitoringUser},
{Name: "DATA_SOURCE_PASS", ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: naming.MonitoringUserSecret(cluster).Name,
},
Key: "password",
},
}}}
{Name: "DATA_SOURCE_PASS_FILE", Value: "/opt/crunchy/password"}}
assert.DeepEqual(t, container.Env, expectedENV)

assert.Assert(t, container.Ports[0].ContainerPort == int32(9187), "Exporter container port number not set to '9187'.")
Expand Down Expand Up @@ -585,6 +578,8 @@ func TestReconcileMonitoringSecret(t *testing.T) {
cluster.UID = types.UID("hippouid")
cluster.Namespace = setupNamespace(t, cc).Name

// If the exporter is disabled then the secret should not exist
// Existing secrets should be removed
t.Run("ExporterDisabled", func(t *testing.T) {
t.Run("NotExisting", func(t *testing.T) {
secret, err := reconciler.reconcileMonitoringSecret(ctx, cluster)
Expand All @@ -607,6 +602,8 @@ func TestReconcileMonitoringSecret(t *testing.T) {
})
})

// If the exporter is enabled then a monitoring secret should exist
// It will need to be created or left in place with existing password
t.Run("ExporterEnabled", func(t *testing.T) {
var (
existing, actual *corev1.Secret
Expand Down
6 changes: 6 additions & 0 deletions testing/kuttl/e2e/exporter-replica/00--create-cluster.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- files/exporter-replica-cluster.yaml
assert:
- files/exporter-replica-cluster-checks.yaml
44 changes: 44 additions & 0 deletions testing/kuttl/e2e/exporter-replica/00-assert.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
apiVersion: kuttl.dev/v1beta1
kind: TestAssert
commands:
# First, check that all containers in the instance(s) pod are ready
# Then, grab the exporter metrics output and check that there were no scrape errors
# Finally, ensure the monitoring user exists and is configured
- script: |
retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; }
check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; }
contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; }
replica=$(kubectl get pods -o name -n "${NAMESPACE}" \
-l postgres-operator.crunchydata.com/cluster=exporter-replica \
-l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true \
-l postgres-operator.crunchydata.com/role=replica)
[ "$replica" = "" ] && retry "Replica Pod not found" && exit 1
replica_condition_json=$(kubectl get "${replica}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}")
[ "$replica_condition_json" = "" ] && retry "Replica conditions not found" && exit 1
{
check_containers_ready "$replica_condition_json"
} || {
retry "containers not ready"
exit 1
}
kubectl exec --stdin "${replica}" --namespace "${NAMESPACE}" -c database \
-- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL'
DO $$
DECLARE
result record;
BEGIN
SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring';
ASSERT FOUND, 'user not found';
END $$
SQL
collectors:
# Grab the exporter container logs on failure
- type: pod
selector: "postgres-operator.crunchydata.com/cluster=exporter-replica,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true"
container: exporter
# Grab the pod describe output
- type: command
command: kubectl -n $NAMESPACE describe pods --selector postgres-operator.crunchydata.com/cluster=exporter-replica,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: exporter-replica
status:
instances:
- name: instance1
readyReplicas: 2
replicas: 2
updatedReplicas: 2
---
apiVersion: v1
kind: Pod
metadata:
labels:
postgres-operator.crunchydata.com/cluster: exporter-replica
postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true"
status:
phase: Running
---
apiVersion: v1
kind: ConfigMap
metadata:
name: exporter-replica-exporter-queries-config
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: exporter-replica
spec:
postgresVersion: ${KUTTL_PG_VERSION}
instances:
- name: instance1
replicas: 2
dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } }
backups:
pgbackrest:
repos:
- name: repo1
volume:
volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } }
monitoring:
pgmonitor:
exporter: {}

0 comments on commit 5e4cc4e

Please sign in to comment.