From c8da1939dca8af6de3a5a15a3b5ba0f0aa783255 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 27 Nov 2024 10:05:20 -0600 Subject: [PATCH 1/5] Treat corev1.PersistentVolumeClaim as a reference type The client.Object interface is implemented only by the pointer type. Subtle bugs can arise from passing a value when you intended to pass a reference. See: 96132b8e79f2ce61f17229e41348751dc6be8664 See: https://go.dev/wiki/MethodSets --- .../controller/postgrescluster/cluster.go | 2 +- .../controller/postgrescluster/controller.go | 2 +- .../controller/postgrescluster/instance.go | 12 ++--- .../postgrescluster/instance_test.go | 18 +++---- .../controller/postgrescluster/pgbackrest.go | 4 +- .../controller/postgrescluster/postgres.go | 26 ++++----- .../controller/postgrescluster/snapshots.go | 10 ++-- .../postgrescluster/snapshots_test.go | 10 ++-- .../controller/postgrescluster/volumes.go | 47 +++++++--------- .../postgrescluster/volumes_test.go | 13 +++-- internal/initialize/primitives.go | 9 ++++ internal/initialize/primitives_test.go | 54 +++++++++++++++++++ 12 files changed, 124 insertions(+), 83 deletions(-) diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index 3ba6eab0e..a8dbff0e7 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -281,7 +281,7 @@ func (r *Reconciler) reconcileClusterReplicaService( // `dataSource.pgbackrest` fields func (r *Reconciler) reconcileDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, observed *observedInstances, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []*corev1.PersistentVolumeClaim, rootCA *pki.RootCertificateAuthority, backupsSpecFound bool, ) (bool, error) { diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index dc7f5fcba..394c87a75 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -162,7 +162,7 @@ func (r *Reconciler) Reconcile( clusterConfigMap *corev1.ConfigMap clusterReplicationSecret *corev1.Secret clusterPodService *corev1.Service - clusterVolumes []corev1.PersistentVolumeClaim + clusterVolumes []*corev1.PersistentVolumeClaim instanceServiceAccount *corev1.ServiceAccount instances *observedInstances patroniLeaderService *corev1.Service diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 0174a6224..ff3810ae3 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -588,7 +588,7 @@ func (r *Reconciler) reconcileInstanceSets( instances *observedInstances, patroniLeaderService *corev1.Service, primaryCertificate *corev1.SecretProjection, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []*corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, backupsSpecFound bool, ) error { @@ -706,12 +706,12 @@ func (r *Reconciler) cleanupPodDisruptionBudgets( // for the instance set specified that are not currently associated with an instance, and then // returning the instance names associated with those PVC's. func findAvailableInstanceNames(set v1beta1.PostgresInstanceSetSpec, - observedInstances *observedInstances, clusterVolumes []corev1.PersistentVolumeClaim) []string { + observedInstances *observedInstances, clusterVolumes []*corev1.PersistentVolumeClaim) []string { availableInstanceNames := []string{} // first identify any PGDATA volumes for the instance set specified - setVolumes := []corev1.PersistentVolumeClaim{} + setVolumes := []*corev1.PersistentVolumeClaim{} for _, pvc := range clusterVolumes { // ignore PGDATA PVCs that are terminating if pvc.GetDeletionTimestamp() != nil { @@ -729,7 +729,7 @@ func findAvailableInstanceNames(set v1beta1.PostgresInstanceSetSpec, // any available PGDATA volumes for the instance set that have no corresponding WAL // volumes (which means new PVCs will simply be reconciled instead). if set.WALVolumeClaimSpec != nil { - setVolumesWithWAL := []corev1.PersistentVolumeClaim{} + setVolumesWithWAL := []*corev1.PersistentVolumeClaim{} for _, setVol := range setVolumes { setVolInstance := setVol.GetLabels()[naming.LabelInstance] for _, pvc := range clusterVolumes { @@ -1066,7 +1066,7 @@ func (r *Reconciler) scaleUpInstances( primaryCertificate *corev1.SecretProjection, availableInstanceNames []string, numInstancePods int, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []*corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, backupsSpecFound bool, ) ([]*appsv1.StatefulSet, error) { @@ -1141,7 +1141,7 @@ func (r *Reconciler) reconcileInstance( primaryCertificate *corev1.SecretProjection, instance *appsv1.StatefulSet, numInstancePods int, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []*corev1.PersistentVolumeClaim, exporterQueriesConfig, exporterWebConfig *corev1.ConfigMap, backupsSpecFound bool, ) error { diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index c851d2b17..f4eda5b05 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1758,7 +1758,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { testCases := []struct { set v1beta1.PostgresInstanceSetSpec fakeObservedInstances *observedInstances - fakeClusterVolumes []corev1.PersistentVolumeClaim + fakeClusterVolumes []*corev1.PersistentVolumeClaim expectedInstanceNames []string }{{ set: v1beta1.PostgresInstanceSetSpec{Name: "instance1"}, @@ -1769,7 +1769,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { []appsv1.StatefulSet{{}}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{{}}, + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{{}}, expectedInstanceNames: []string{}, }, { set: v1beta1.PostgresInstanceSetSpec{Name: "instance1"}, @@ -1783,7 +1783,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { naming.LabelInstanceSet: "instance1"}}}}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ naming.LabelRole: naming.RolePostgresData, @@ -1802,7 +1802,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { naming.LabelInstanceSet: "instance1"}}}}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{}, + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{}, expectedInstanceNames: []string{}, }, { set: v1beta1.PostgresInstanceSetSpec{Name: "instance1"}, @@ -1816,7 +1816,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { naming.LabelInstanceSet: "instance1"}}}}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{ + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ @@ -1843,7 +1843,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { naming.LabelInstanceSet: "instance1"}}}}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ naming.LabelRole: naming.RolePostgresData, @@ -1863,7 +1863,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { naming.LabelInstanceSet: "instance1"}}}}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{ + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{ Name: "instance1-abc-def", Labels: map[string]string{ @@ -1887,7 +1887,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { []appsv1.StatefulSet{}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{ + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{ Name: "instance1-def-ghi", Labels: map[string]string{ @@ -1911,7 +1911,7 @@ func TestFindAvailableInstanceNames(t *testing.T) { []appsv1.StatefulSet{}, []corev1.Pod{}, ), - fakeClusterVolumes: []corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ + fakeClusterVolumes: []*corev1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{ Name: "instance1-def-ghi", Labels: map[string]string{ naming.LabelRole: naming.RolePostgresData, diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index ff819bab5..95f3cf643 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1499,7 +1499,7 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, // for the PostgresCluster being reconciled using the backups of another PostgresCluster. func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, dataSource *v1beta1.PostgresClusterDataSource, - configHash string, clusterVolumes []corev1.PersistentVolumeClaim, + configHash string, clusterVolumes []*corev1.PersistentVolumeClaim, rootCA *pki.RootCertificateAuthority, backupsSpecFound bool, ) error { @@ -1663,7 +1663,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, // data source, i.e., S3, etc. func (r *Reconciler) reconcileCloudBasedDataSource(ctx context.Context, cluster *v1beta1.PostgresCluster, dataSource *v1beta1.PGBackRestDataSource, - configHash string, clusterVolumes []corev1.PersistentVolumeClaim) error { + configHash string, clusterVolumes []*corev1.PersistentVolumeClaim) error { // Ensure the proper instance and instance set can be identified via the status. The // StartupInstance and StartupInstanceSet values should be populated when the cluster diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index 312079d82..b851230e4 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -20,6 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" @@ -569,7 +570,7 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( func (r *Reconciler) reconcilePostgresDataVolume( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceSpec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - clusterVolumes []corev1.PersistentVolumeClaim, sourceCluster *v1beta1.PostgresCluster, + clusterVolumes []*corev1.PersistentVolumeClaim, sourceCluster *v1beta1.PostgresCluster, ) (*corev1.PersistentVolumeClaim, error) { labelMap := map[string]string{ @@ -581,10 +582,7 @@ func (r *Reconciler) reconcilePostgresDataVolume( } var pvc *corev1.PersistentVolumeClaim - existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) - if err != nil { - return nil, errors.WithStack(err) - } + existingPVCName := getPVCName(clusterVolumes, labels.SelectorFromSet(labelMap)) if existingPVCName != "" { pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.GetNamespace(), @@ -596,7 +594,7 @@ func (r *Reconciler) reconcilePostgresDataVolume( pvc.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim")) - err = errors.WithStack(r.setControllerReference(cluster, pvc)) + err := errors.WithStack(r.setControllerReference(cluster, pvc)) pvc.Annotations = naming.Merge( cluster.Spec.Metadata.GetAnnotationsOrNil(), @@ -726,7 +724,7 @@ func (r *Reconciler) setVolumeSize(ctx context.Context, cluster *v1beta1.Postgre func (r *Reconciler) reconcileTablespaceVolumes( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceSpec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []*corev1.PersistentVolumeClaim, ) (tablespaceVolumes []*corev1.PersistentVolumeClaim, err error) { if !feature.Enabled(ctx, feature.TablespaceVolumes) { @@ -747,10 +745,7 @@ func (r *Reconciler) reconcileTablespaceVolumes( } var pvc *corev1.PersistentVolumeClaim - existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) - if err != nil { - return nil, errors.WithStack(err) - } + existingPVCName := getPVCName(clusterVolumes, labels.SelectorFromSet(labelMap)) if existingPVCName != "" { pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.GetNamespace(), @@ -799,7 +794,7 @@ func (r *Reconciler) reconcileTablespaceVolumes( func (r *Reconciler) reconcilePostgresWALVolume( ctx context.Context, cluster *v1beta1.PostgresCluster, instanceSpec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - observed *Instance, clusterVolumes []corev1.PersistentVolumeClaim, + observed *Instance, clusterVolumes []*corev1.PersistentVolumeClaim, ) (*corev1.PersistentVolumeClaim, error) { labelMap := map[string]string{ @@ -811,10 +806,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( } var pvc *corev1.PersistentVolumeClaim - existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) - if err != nil { - return nil, errors.WithStack(err) - } + existingPVCName := getPVCName(clusterVolumes, labels.SelectorFromSet(labelMap)) if existingPVCName != "" { pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.GetNamespace(), @@ -872,7 +864,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( return pvc, err } - err = errors.WithStack(r.setControllerReference(cluster, pvc)) + err := errors.WithStack(r.setControllerReference(cluster, pvc)) pvc.Annotations = naming.Merge( cluster.Spec.Metadata.GetAnnotationsOrNil(), diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index 2b6550593..932aa19fd 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -14,6 +14,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" @@ -164,7 +165,7 @@ func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, // after a successful backup. func (r *Reconciler) reconcileDedicatedSnapshotVolume( ctx context.Context, cluster *v1beta1.PostgresCluster, - clusterVolumes []corev1.PersistentVolumeClaim, + clusterVolumes []*corev1.PersistentVolumeClaim, ) (*corev1.PersistentVolumeClaim, error) { // If VolumeSnapshots feature gate is disabled, do nothing and return early. @@ -181,10 +182,7 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( // If volume already exists, use existing name. Otherwise, generate a name. var pvc *corev1.PersistentVolumeClaim - existingPVCName, err := getPGPVCName(labelMap, clusterVolumes) - if err != nil { - return nil, errors.WithStack(err) - } + existingPVCName := getPVCName(clusterVolumes, labels.SelectorFromSet(labelMap)) if existingPVCName != "" { pvc = &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.GetNamespace(), @@ -208,7 +206,7 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( // If we've got this far, snapshots are enabled so we should create/update/get // the dedicated snapshot volume - pvc, err = r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) + pvc, err := r.createDedicatedSnapshotVolume(ctx, cluster, labelMap, pvc) if err != nil { return pvc, err } diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index 828ad3ea2..ca149d7c8 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -405,7 +405,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { assert.Equal(t, len(pvcs.Items), 1) // Create volumes for reconcile - clusterVolumes := []corev1.PersistentVolumeClaim{*pvc} + clusterVolumes := []*corev1.PersistentVolumeClaim{pvc} // Reconcile returned, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) @@ -434,7 +434,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) // Create volumes for reconcile - clusterVolumes := []corev1.PersistentVolumeClaim{} + clusterVolumes := []*corev1.PersistentVolumeClaim{} // Reconcile pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) @@ -480,7 +480,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) - clusterVolumes := []corev1.PersistentVolumeClaim{} + clusterVolumes := []*corev1.PersistentVolumeClaim{} // Reconcile pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) @@ -544,7 +544,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) - clusterVolumes := []corev1.PersistentVolumeClaim{} + clusterVolumes := []*corev1.PersistentVolumeClaim{} // Reconcile pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) @@ -611,7 +611,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Setup instances and volumes for reconcile sts := &appsv1.StatefulSet{} generateInstanceStatefulSetIntent(ctx, cluster, &cluster.Spec.InstanceSets[0], "pod-service", "service-account", sts, 1) - clusterVolumes := []corev1.PersistentVolumeClaim{} + clusterVolumes := []*corev1.PersistentVolumeClaim{} // Reconcile pvc, err := r.reconcileDedicatedSnapshotVolume(ctx, cluster, clusterVolumes) diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index f11747600..838c2d4d9 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -34,7 +34,7 @@ import ( // API and sets the PersistentVolumeResizing condition as appropriate. func (r *Reconciler) observePersistentVolumeClaims( ctx context.Context, cluster *v1beta1.PostgresCluster, -) ([]corev1.PersistentVolumeClaim, error) { +) ([]*corev1.PersistentVolumeClaim, error) { volumes := &corev1.PersistentVolumeClaimList{} selector, err := naming.AsSelector(naming.Cluster(cluster.Name)) @@ -140,7 +140,7 @@ func (r *Reconciler) observePersistentVolumeClaims( meta.RemoveStatusCondition(&cluster.Status.Conditions, resizing.Type) } - return volumes.Items, err + return initialize.Pointers(volumes.Items...), err } // configureExistingPVCs configures the defined pgData, pg_wal and pgBackRest @@ -151,8 +151,8 @@ func (r *Reconciler) observePersistentVolumeClaims( // bootstrapping. func (r *Reconciler) configureExistingPVCs( ctx context.Context, cluster *v1beta1.PostgresCluster, - volumes []corev1.PersistentVolumeClaim, -) ([]corev1.PersistentVolumeClaim, error) { + volumes []*corev1.PersistentVolumeClaim, +) ([]*corev1.PersistentVolumeClaim, error) { var err error @@ -197,9 +197,9 @@ func (r *Reconciler) configureExistingPVCs( func (r *Reconciler) configureExistingPGVolumes( ctx context.Context, cluster *v1beta1.PostgresCluster, - volumes []corev1.PersistentVolumeClaim, + volumes []*corev1.PersistentVolumeClaim, instanceName string, -) ([]corev1.PersistentVolumeClaim, error) { +) ([]*corev1.PersistentVolumeClaim, error) { // if the volume is already in the list, move on for i := range volumes { @@ -235,7 +235,7 @@ func (r *Reconciler) configureExistingPGVolumes( if err := errors.WithStack(r.apply(ctx, volume)); err != nil { return volumes, err } - volumes = append(volumes, *volume) + volumes = append(volumes, volume) } } return volumes, nil @@ -250,9 +250,9 @@ func (r *Reconciler) configureExistingPGVolumes( func (r *Reconciler) configureExistingPGWALVolume( ctx context.Context, cluster *v1beta1.PostgresCluster, - volumes []corev1.PersistentVolumeClaim, + volumes []*corev1.PersistentVolumeClaim, instanceName string, -) ([]corev1.PersistentVolumeClaim, error) { +) ([]*corev1.PersistentVolumeClaim, error) { // if the volume is already in the list, move on for i := range volumes { @@ -288,7 +288,7 @@ func (r *Reconciler) configureExistingPGWALVolume( if err := errors.WithStack(r.apply(ctx, volume)); err != nil { return volumes, err } - volumes = append(volumes, *volume) + volumes = append(volumes, volume) } return volumes, nil } @@ -302,8 +302,8 @@ func (r *Reconciler) configureExistingPGWALVolume( func (r *Reconciler) configureExistingRepoVolumes( ctx context.Context, cluster *v1beta1.PostgresCluster, - volumes []corev1.PersistentVolumeClaim, -) ([]corev1.PersistentVolumeClaim, error) { + volumes []*corev1.PersistentVolumeClaim, +) ([]*corev1.PersistentVolumeClaim, error) { // if the volume is already in the list, move on for i := range volumes { @@ -337,7 +337,7 @@ func (r *Reconciler) configureExistingRepoVolumes( if err := errors.WithStack(r.apply(ctx, volume)); err != nil { return volumes, err } - volumes = append(volumes, *volume) + volumes = append(volumes, volume) } } return volumes, nil @@ -859,23 +859,12 @@ func getRepoPVCNames( return repoPVCs } -// getPGPVCName returns the name of a PVC that has the provided labels, if found. -func getPGPVCName(labelMap map[string]string, - clusterVolumes []corev1.PersistentVolumeClaim, -) (string, error) { - - selector, err := naming.AsSelector(metav1.LabelSelector{ - MatchLabels: labelMap, - }) - if err != nil { - return "", errors.WithStack(err) - } - - for _, pvc := range clusterVolumes { +// getPVCName returns the name of a PVC that matches the selector, if any. +func getPVCName(volumes []*corev1.PersistentVolumeClaim, selector labels.Selector) string { + for _, pvc := range volumes { if selector.Matches(labels.Set(pvc.GetLabels())) { - return pvc.GetName(), nil + return pvc.GetName() } } - - return "", nil + return "" } diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index 96eef5f91..b4156072b 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -16,6 +16,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" @@ -295,7 +296,7 @@ func TestGetPVCNameMethods(t *testing.T) { naming.LabelInstance: "testinstance1-abcd", naming.LabelRole: naming.RolePostgresWAL, } - clusterVolumes := []corev1.PersistentVolumeClaim{*pgDataPVC, *walPVC} + clusterVolumes := []*corev1.PersistentVolumeClaim{pgDataPVC, walPVC} repoPVC1 := pvc.DeepCopy() repoPVC1.Name = "testrepovol1" @@ -319,26 +320,24 @@ func TestGetPVCNameMethods(t *testing.T) { t.Run("get pgdata PVC", func(t *testing.T) { - pvcNames, err := getPGPVCName(map[string]string{ + pvcNames := getPVCName(clusterVolumes, labels.SelectorFromSet(map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: "testinstance1", naming.LabelInstance: "testinstance1-abcd", naming.LabelRole: naming.RolePostgresData, - }, clusterVolumes) - assert.NilError(t, err) + })) assert.Assert(t, pvcNames == "testpgdatavol") }) t.Run("get wal PVC", func(t *testing.T) { - pvcNames, err := getPGPVCName(map[string]string{ + pvcNames := getPVCName(clusterVolumes, labels.SelectorFromSet(map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: "testinstance1", naming.LabelInstance: "testinstance1-abcd", naming.LabelRole: naming.RolePostgresWAL, - }, clusterVolumes) - assert.NilError(t, err) + })) assert.Assert(t, pvcNames == "testwalvol") }) diff --git a/internal/initialize/primitives.go b/internal/initialize/primitives.go index 9bc264f88..26b7ac2d3 100644 --- a/internal/initialize/primitives.go +++ b/internal/initialize/primitives.go @@ -35,5 +35,14 @@ func Map[M ~map[K]V, K comparable, V any](m *M) { // Pointer returns a pointer to v. func Pointer[T any](v T) *T { return &v } +// Pointers returns a slice of pointers to the items in v. +func Pointers[T any](v ...T) []*T { + p := make([]*T, len(v)) + for i := range v { + p[i] = &v[i] + } + return p +} + // String returns a pointer to v. func String(v string) *string { return &v } diff --git a/internal/initialize/primitives_test.go b/internal/initialize/primitives_test.go index e39898b4f..36790e4ae 100644 --- a/internal/initialize/primitives_test.go +++ b/internal/initialize/primitives_test.go @@ -190,6 +190,60 @@ func TestPointer(t *testing.T) { }) } +func TestPointers(t *testing.T) { + t.Run("arguments", func(t *testing.T) { + assert.Assert(t, nil != initialize.Pointers[int](), "does not return nil slice") + assert.DeepEqual(t, []*int{}, initialize.Pointers[int]()) + + s1 := initialize.Pointers(0, -99, 42) + if assert.Check(t, len(s1) == 3, "got %#v", s1) { + if assert.Check(t, s1[0] != nil) { + assert.Equal(t, *s1[0], 0) + } + if assert.Check(t, s1[1] != nil) { + assert.Equal(t, *s1[1], -99) + } + if assert.Check(t, s1[2] != nil) { + assert.Equal(t, *s1[2], 42) + } + } + + // Values are the same, but pointers differ. + s2 := initialize.Pointers(0, -99, 42) + assert.DeepEqual(t, s1, s2) + assert.Assert(t, s1[0] != s2[0]) + assert.Assert(t, s1[1] != s2[1]) + assert.Assert(t, s1[2] != s2[2]) + }) + + t.Run("slice", func(t *testing.T) { + var z []string + assert.Assert(t, nil != initialize.Pointers(z...), "does not return nil slice") + assert.DeepEqual(t, []*string{}, initialize.Pointers(z...)) + + v := []string{"doot", "", "baz"} + s1 := initialize.Pointers(v...) + if assert.Check(t, len(s1) == 3, "got %#v", s1) { + if assert.Check(t, s1[0] != nil) { + assert.Equal(t, *s1[0], "doot") + } + if assert.Check(t, s1[1] != nil) { + assert.Equal(t, *s1[1], "") + } + if assert.Check(t, s1[2] != nil) { + assert.Equal(t, *s1[2], "baz") + } + } + + // Values and pointers are the same. + s2 := initialize.Pointers(v...) + assert.DeepEqual(t, s1, s2) + assert.Assert(t, s1[0] == s2[0]) + assert.Assert(t, s1[1] == s2[1]) + assert.Assert(t, s1[2] == s2[2]) + }) +} + func TestString(t *testing.T) { z := initialize.String("") if assert.Check(t, z != nil) { From 66883b92bfefbeebea8622c62f8cad85705e769b Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 27 Nov 2024 10:23:21 -0600 Subject: [PATCH 2/5] Pass slices of *batchv1.Job rather than *batchv1.JobList --- .../controller/postgrescluster/volumes.go | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index 838c2d4d9..f0c8d36db 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -354,8 +354,8 @@ func (r *Reconciler) reconcileDirMoveJobs(ctx context.Context, if cluster.Spec.DataSource != nil && cluster.Spec.DataSource.Volumes != nil { - moveJobs := &batchv1.JobList{} - if err := r.Client.List(ctx, moveJobs, &client.ListOptions{ + var list batchv1.JobList + if err := r.Client.List(ctx, &list, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), }); err != nil { @@ -364,6 +364,7 @@ func (r *Reconciler) reconcileDirMoveJobs(ctx context.Context, var err error var pgDataReturn, pgWALReturn, repoReturn bool + var moveJobs = initialize.Pointers(list.Items...) if cluster.Spec.DataSource.Volumes.PGDataVolume != nil && cluster.Spec.DataSource.Volumes.PGDataVolume. @@ -405,19 +406,19 @@ func (r *Reconciler) reconcileDirMoveJobs(ctx context.Context, // main control loop should continue or return early to allow time for the job // to complete. func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, - cluster *v1beta1.PostgresCluster, moveJobs *batchv1.JobList) (bool, error) { + cluster *v1beta1.PostgresCluster, moveJobs []*batchv1.Job) (bool, error) { moveDirJob := &batchv1.Job{} moveDirJob.ObjectMeta = naming.MovePGDataDirJob(cluster) // check for an existing Job - for i := range moveJobs.Items { - if moveJobs.Items[i].Name == moveDirJob.Name { - if jobCompleted(&moveJobs.Items[i]) { + for i := range moveJobs { + if moveJobs[i].Name == moveDirJob.Name { + if jobCompleted(moveJobs[i]) { // if the Job is completed, return as this only needs to run once return false, nil } - if !jobFailed(&moveJobs.Items[i]) { + if !jobFailed(moveJobs[i]) { // if the Job otherwise exists and has not failed, return and // give the Job time to finish return true, nil @@ -530,19 +531,19 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // main control loop should continue or return early to allow time for the job // to complete. func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, - cluster *v1beta1.PostgresCluster, moveJobs *batchv1.JobList) (bool, error) { + cluster *v1beta1.PostgresCluster, moveJobs []*batchv1.Job) (bool, error) { moveDirJob := &batchv1.Job{} moveDirJob.ObjectMeta = naming.MovePGWALDirJob(cluster) // check for an existing Job - for i := range moveJobs.Items { - if moveJobs.Items[i].Name == moveDirJob.Name { - if jobCompleted(&moveJobs.Items[i]) { + for i := range moveJobs { + if moveJobs[i].Name == moveDirJob.Name { + if jobCompleted(moveJobs[i]) { // if the Job is completed, return as this only needs to run once return false, nil } - if !jobFailed(&moveJobs.Items[i]) { + if !jobFailed(moveJobs[i]) { // if the Job otherwise exists and has not failed, return and // give the Job time to finish return true, nil @@ -649,19 +650,19 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, // indicating whether the main control loop should continue or return early // to allow time for the job to complete. func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, - cluster *v1beta1.PostgresCluster, moveJobs *batchv1.JobList) (bool, error) { + cluster *v1beta1.PostgresCluster, moveJobs []*batchv1.Job) (bool, error) { moveDirJob := &batchv1.Job{} moveDirJob.ObjectMeta = naming.MovePGBackRestRepoDirJob(cluster) // check for an existing Job - for i := range moveJobs.Items { - if moveJobs.Items[i].Name == moveDirJob.Name { - if jobCompleted(&moveJobs.Items[i]) { + for i := range moveJobs { + if moveJobs[i].Name == moveDirJob.Name { + if jobCompleted(moveJobs[i]) { // if the Job is completed, return as this only needs to run once return false, nil } - if !jobFailed(&moveJobs.Items[i]) { + if !jobFailed(moveJobs[i]) { // if the Job otherwise exists and has not failed, return and // give the Job time to finish return true, nil From 9cacf66a7398f9723140c1df170fadb2ace4bb2c Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 27 Nov 2024 10:21:47 -0600 Subject: [PATCH 3/5] Pass slices of *VolumeSnapshot rather than *VolumeSnapshotList --- .../controller/postgrescluster/snapshots.go | 40 +-- .../postgrescluster/snapshots_test.go | 264 ++++++++---------- 2 files changed, 142 insertions(+), 162 deletions(-) diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index 932aa19fd..9d10d5547 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -101,10 +101,10 @@ func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, if snapshotWithLatestError != nil { r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "VolumeSnapshotError", *snapshotWithLatestError.Status.Error.Message) - for _, snapshot := range snapshots.Items { + for _, snapshot := range snapshots { if snapshot.Status != nil && snapshot.Status.Error != nil && snapshot.Status.Error.Time.Before(snapshotWithLatestError.Status.Error.Time) { - err = r.deleteControlled(ctx, postgrescluster, &snapshot) + err = r.deleteControlled(ctx, postgrescluster, snapshot) if err != nil { return err } @@ -123,7 +123,7 @@ func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, // the dedicated pvc. var snapshotForPvcUpdateIdx int snapshotFoundForPvcUpdate := false - for idx, snapshot := range snapshots.Items { + for idx, snapshot := range snapshots { if snapshot.GetAnnotations()[naming.PGBackRestBackupJobCompletion] == pvcUpdateTimeStamp { snapshotForPvcUpdateIdx = idx snapshotFoundForPvcUpdate = true @@ -132,11 +132,11 @@ func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, // If a snapshot exists for the latest backup that has been restored into the dedicated pvc // and the snapshot is Ready, delete all other snapshots. - if snapshotFoundForPvcUpdate && snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse != nil && - *snapshots.Items[snapshotForPvcUpdateIdx].Status.ReadyToUse { - for idx, snapshot := range snapshots.Items { + if snapshotFoundForPvcUpdate && snapshots[snapshotForPvcUpdateIdx].Status.ReadyToUse != nil && + *snapshots[snapshotForPvcUpdateIdx].Status.ReadyToUse { + for idx, snapshot := range snapshots { if idx != snapshotForPvcUpdateIdx { - err = r.deleteControlled(ctx, postgrescluster, &snapshot) + err = r.deleteControlled(ctx, postgrescluster, snapshot) if err != nil { return err } @@ -523,16 +523,16 @@ func (r *Reconciler) getLatestCompleteBackupJob(ctx context.Context, // getSnapshotWithLatestError takes a VolumeSnapshotList and returns a pointer to the // snapshot that has most recently had an error. If no snapshot errors exist // then it returns nil. -func getSnapshotWithLatestError(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { +func getSnapshotWithLatestError(snapshots []*volumesnapshotv1.VolumeSnapshot) *volumesnapshotv1.VolumeSnapshot { zeroTime := metav1.NewTime(time.Time{}) - snapshotWithLatestError := volumesnapshotv1.VolumeSnapshot{ + snapshotWithLatestError := &volumesnapshotv1.VolumeSnapshot{ Status: &volumesnapshotv1.VolumeSnapshotStatus{ Error: &volumesnapshotv1.VolumeSnapshotError{ Time: &zeroTime, }, }, } - for _, snapshot := range snapshots.Items { + for _, snapshot := range snapshots { if snapshot.Status != nil && snapshot.Status.Error != nil && snapshotWithLatestError.Status.Error.Time.Before(snapshot.Status.Error.Time) { snapshotWithLatestError = snapshot @@ -543,12 +543,12 @@ func getSnapshotWithLatestError(snapshots *volumesnapshotv1.VolumeSnapshotList) return nil } - return &snapshotWithLatestError + return snapshotWithLatestError } // getSnapshotsForCluster gets all the VolumeSnapshots for a given postgrescluster. func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta1.PostgresCluster) ( - *volumesnapshotv1.VolumeSnapshotList, error) { + []*volumesnapshotv1.VolumeSnapshot, error) { selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) if err != nil { @@ -561,18 +561,18 @@ func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - return snapshots, err + return initialize.Pointers(snapshots.Items...), err } // getLatestReadySnapshot takes a VolumeSnapshotList and returns the latest ready VolumeSnapshot. -func getLatestReadySnapshot(snapshots *volumesnapshotv1.VolumeSnapshotList) *volumesnapshotv1.VolumeSnapshot { +func getLatestReadySnapshot(snapshots []*volumesnapshotv1.VolumeSnapshot) *volumesnapshotv1.VolumeSnapshot { zeroTime := metav1.NewTime(time.Time{}) - latestReadySnapshot := volumesnapshotv1.VolumeSnapshot{ + latestReadySnapshot := &volumesnapshotv1.VolumeSnapshot{ Status: &volumesnapshotv1.VolumeSnapshotStatus{ CreationTime: &zeroTime, }, } - for _, snapshot := range snapshots.Items { + for _, snapshot := range snapshots { if snapshot.Status != nil && snapshot.Status.ReadyToUse != nil && *snapshot.Status.ReadyToUse && latestReadySnapshot.Status.CreationTime.Before(snapshot.Status.CreationTime) { latestReadySnapshot = snapshot @@ -583,17 +583,17 @@ func getLatestReadySnapshot(snapshots *volumesnapshotv1.VolumeSnapshotList) *vol return nil } - return &latestReadySnapshot + return latestReadySnapshot } // deleteSnapshots takes a postgrescluster and a snapshot list and deletes all snapshots // in the list that are controlled by the provided postgrescluster. func (r *Reconciler) deleteSnapshots(ctx context.Context, - postgrescluster *v1beta1.PostgresCluster, snapshots *volumesnapshotv1.VolumeSnapshotList) error { + postgrescluster *v1beta1.PostgresCluster, snapshots []*volumesnapshotv1.VolumeSnapshot) error { - for i := range snapshots.Items { + for i := range snapshots { err := errors.WithStack(client.IgnoreNotFound( - r.deleteControlled(ctx, postgrescluster, &snapshots.Items[i]))) + r.deleteControlled(ctx, postgrescluster, snapshots[i]))) if err != nil { return err } diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index ca149d7c8..b5ad58208 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -917,106 +917,98 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { func TestGetSnapshotWithLatestError(t *testing.T) { t.Run("NoSnapshots", func(t *testing.T) { - snapshotList := &volumesnapshotv1.VolumeSnapshotList{} - snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + snapshots := []*volumesnapshotv1.VolumeSnapshot{} + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) assert.Check(t, snapshotWithLatestError == nil) }) t.Run("NoSnapshotsWithStatus", func(t *testing.T) { - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - {}, - {}, - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + {}, + {}, } - snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) assert.Check(t, snapshotWithLatestError == nil) }) t.Run("NoSnapshotsWithErrors", func(t *testing.T) { - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - { - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - ReadyToUse: initialize.Bool(true), - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(true), }, - { - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - ReadyToUse: initialize.Bool(false), - }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), }, }, } - snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) assert.Check(t, snapshotWithLatestError == nil) }) t.Run("OneSnapshotWithError", func(t *testing.T) { currentTime := metav1.Now() earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "good-snapshot", - UID: "the-uid-123", - }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - CreationTime: ¤tTime, - ReadyToUse: initialize.Bool(true), - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "bad-snapshot", - UID: "the-uid-456", - }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - ReadyToUse: initialize.Bool(false), - Error: &volumesnapshotv1.VolumeSnapshotError{ - Time: &earlierTime, - }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, }, }, }, } - snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "bad-snapshot") }) t.Run("TwoSnapshotsWithErrors", func(t *testing.T) { currentTime := metav1.Now() earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "first-bad-snapshot", - UID: "the-uid-123", - }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - ReadyToUse: initialize.Bool(false), - Error: &volumesnapshotv1.VolumeSnapshotError{ - Time: &earlierTime, - }, - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-bad-snapshot", + UID: "the-uid-123", }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "second-bad-snapshot", - UID: "the-uid-456", + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: &earlierTime, }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - ReadyToUse: initialize.Bool(false), - Error: &volumesnapshotv1.VolumeSnapshotError{ - Time: ¤tTime, - }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), + Error: &volumesnapshotv1.VolumeSnapshotError{ + Time: ¤tTime, }, }, }, } - snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + snapshotWithLatestError := getSnapshotWithLatestError(snapshots) assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "second-bad-snapshot") }) } @@ -1038,7 +1030,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { t.Run("NoSnapshots", func(t *testing.T) { snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) - assert.Equal(t, len(snapshots.Items), 0) + assert.Equal(t, len(snapshots), 0) }) t.Run("NoSnapshotsForCluster", func(t *testing.T) { @@ -1061,7 +1053,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) - assert.Equal(t, len(snapshots.Items), 0) + assert.Equal(t, len(snapshots), 0) }) t.Run("OneSnapshotForCluster", func(t *testing.T) { @@ -1102,8 +1094,8 @@ func TestGetSnapshotsForCluster(t *testing.T) { snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) - assert.Equal(t, len(snapshots.Items), 1) - assert.Equal(t, snapshots.Items[0].Name, "another-snapshot") + assert.Equal(t, len(snapshots), 1) + assert.Equal(t, snapshots[0].Name, "another-snapshot") }) t.Run("TwoSnapshotsForCluster", func(t *testing.T) { @@ -1144,106 +1136,98 @@ func TestGetSnapshotsForCluster(t *testing.T) { snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) - assert.Equal(t, len(snapshots.Items), 2) + assert.Equal(t, len(snapshots), 2) }) } func TestGetLatestReadySnapshot(t *testing.T) { t.Run("NoSnapshots", func(t *testing.T) { - snapshotList := &volumesnapshotv1.VolumeSnapshotList{} - latestReadySnapshot := getLatestReadySnapshot(snapshotList) + snapshots := []*volumesnapshotv1.VolumeSnapshot{} + latestReadySnapshot := getLatestReadySnapshot(snapshots) assert.Assert(t, latestReadySnapshot == nil) }) t.Run("NoSnapshotsWithStatus", func(t *testing.T) { - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - {}, - {}, - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + {}, + {}, } - latestReadySnapshot := getLatestReadySnapshot(snapshotList) + latestReadySnapshot := getLatestReadySnapshot(snapshots) assert.Assert(t, latestReadySnapshot == nil) }) t.Run("NoReadySnapshots", func(t *testing.T) { - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - { - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - ReadyToUse: initialize.Bool(false), - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), }, - { - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - ReadyToUse: initialize.Bool(false), - }, + }, + { + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + ReadyToUse: initialize.Bool(false), }, }, } - latestReadySnapshot := getLatestReadySnapshot(snapshotList) + latestReadySnapshot := getLatestReadySnapshot(snapshots) assert.Assert(t, latestReadySnapshot == nil) }) t.Run("OneReadySnapshot", func(t *testing.T) { currentTime := metav1.Now() earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "good-snapshot", - UID: "the-uid-123", - }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - CreationTime: &earlierTime, - ReadyToUse: initialize.Bool(true), - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "good-snapshot", + UID: "the-uid-123", }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "bad-snapshot", - UID: "the-uid-456", - }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - CreationTime: ¤tTime, - ReadyToUse: initialize.Bool(false), - }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bad-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(false), }, }, } - latestReadySnapshot := getLatestReadySnapshot(snapshotList) + latestReadySnapshot := getLatestReadySnapshot(snapshots) assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "good-snapshot") }) t.Run("TwoReadySnapshots", func(t *testing.T) { currentTime := metav1.Now() earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "first-good-snapshot", - UID: "the-uid-123", - }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - CreationTime: &earlierTime, - ReadyToUse: initialize.Bool(true), - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "first-good-snapshot", + UID: "the-uid-123", }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "second-good-snapshot", - UID: "the-uid-456", - }, - Status: &volumesnapshotv1.VolumeSnapshotStatus{ - CreationTime: ¤tTime, - ReadyToUse: initialize.Bool(true), - }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: &earlierTime, + ReadyToUse: initialize.Bool(true), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "second-good-snapshot", + UID: "the-uid-456", + }, + Status: &volumesnapshotv1.VolumeSnapshotStatus{ + CreationTime: ¤tTime, + ReadyToUse: initialize.Bool(true), }, }, } - latestReadySnapshot := getLatestReadySnapshot(snapshotList) + latestReadySnapshot := getLatestReadySnapshot(snapshots) assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "second-good-snapshot") }) } @@ -1275,8 +1259,8 @@ func TestDeleteSnapshots(t *testing.T) { }) t.Run("NoSnapshots", func(t *testing.T) { - snapshotList := &volumesnapshotv1.VolumeSnapshotList{} - err := r.deleteSnapshots(ctx, cluster, snapshotList) + snapshots := []*volumesnapshotv1.VolumeSnapshot{} + err := r.deleteSnapshots(ctx, cluster, snapshots) assert.NilError(t, err) }) @@ -1300,12 +1284,10 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) assert.NilError(t, r.apply(ctx, snapshot1)) - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - *snapshot1, - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + snapshot1, } - assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, r.Client.List(ctx, existingSnapshots, @@ -1352,12 +1334,10 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.setControllerReference(cluster, snapshot2)) assert.NilError(t, r.apply(ctx, snapshot2)) - snapshotList := &volumesnapshotv1.VolumeSnapshotList{ - Items: []volumesnapshotv1.VolumeSnapshot{ - *snapshot1, *snapshot2, - }, + snapshots := []*volumesnapshotv1.VolumeSnapshot{ + snapshot1, snapshot2, } - assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, r.Client.List(ctx, existingSnapshots, From 34cdcbfbb7f3c8cd0ae03160ad57fe0c47388a7b Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 27 Nov 2024 10:26:18 -0600 Subject: [PATCH 4/5] Pass slices of *PostgresCluster rather than *PostgresClusterList --- .../standalone_pgadmin/configmap.go | 17 +++++++------- .../standalone_pgadmin/configmap_test.go | 13 +++++------ .../standalone_pgadmin/controller.go | 2 +- .../standalone_pgadmin/postgrescluster.go | 22 +++++++++---------- 4 files changed, 25 insertions(+), 29 deletions(-) diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go index d1ec39bf1..4d3a2f1a8 100644 --- a/internal/controller/standalone_pgadmin/configmap.go +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -9,8 +9,10 @@ import ( "context" "encoding/json" "fmt" + "slices" "sort" "strconv" + "strings" corev1 "k8s.io/api/core/v1" @@ -27,7 +29,7 @@ import ( // reconcilePGAdminConfigMap writes the ConfigMap for pgAdmin. func (r *PGAdminReconciler) reconcilePGAdminConfigMap( ctx context.Context, pgadmin *v1beta1.PGAdmin, - clusters map[string]*v1beta1.PostgresClusterList, + clusters map[string][]*v1beta1.PostgresCluster, ) (*corev1.ConfigMap, error) { configmap, err := configmap(pgadmin, clusters) if err == nil { @@ -42,7 +44,7 @@ func (r *PGAdminReconciler) reconcilePGAdminConfigMap( // configmap returns a v1.ConfigMap for pgAdmin. func configmap(pgadmin *v1beta1.PGAdmin, - clusters map[string]*v1beta1.PostgresClusterList, + clusters map[string][]*v1beta1.PostgresCluster, ) (*corev1.ConfigMap, error) { configmap := &corev1.ConfigMap{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} configmap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) @@ -126,7 +128,7 @@ func generateConfig(pgadmin *v1beta1.PGAdmin) (string, error) { // } // } func generateClusterConfig( - clusters map[string]*v1beta1.PostgresClusterList, + clusters map[string][]*v1beta1.PostgresCluster, ) (string, error) { // To avoid spurious reconciles, the following value must not change when // the spec does not change. [json.Encoder] and [json.Marshal] do this by @@ -149,11 +151,10 @@ func generateClusterConfig( clusterServers := map[int]any{} for _, serverGroupName := range keys { - sort.Slice(clusters[serverGroupName].Items, - func(i, j int) bool { - return clusters[serverGroupName].Items[i].Name < clusters[serverGroupName].Items[j].Name - }) - for _, cluster := range clusters[serverGroupName].Items { + slices.SortFunc(clusters[serverGroupName], func(a, b *v1beta1.PostgresCluster) int { + return strings.Compare(a.Name, b.Name) + }) + for _, cluster := range clusters[serverGroupName] { object := map[string]any{ "Name": cluster.Name, "Group": serverGroupName, diff --git a/internal/controller/standalone_pgadmin/configmap_test.go b/internal/controller/standalone_pgadmin/configmap_test.go index 5a844e520..9cdbda2f2 100644 --- a/internal/controller/standalone_pgadmin/configmap_test.go +++ b/internal/controller/standalone_pgadmin/configmap_test.go @@ -78,13 +78,10 @@ func TestGenerateClusterConfig(t *testing.T) { cluster := testCluster() cluster.Namespace = "postgres-operator" - clusterList := &v1beta1.PostgresClusterList{ - Items: []v1beta1.PostgresCluster{*cluster, *cluster}, - } - clusters := map[string]*v1beta1.PostgresClusterList{ - "shared": clusterList, - "test": clusterList, - "hello": clusterList, + clusters := map[string][]*v1beta1.PostgresCluster{ + "shared": {cluster, cluster}, + "test": {cluster, cluster}, + "hello": {cluster, cluster}, } expectedString := `{ @@ -163,7 +160,7 @@ func TestGeneratePGAdminConfigMap(t *testing.T) { pgadmin := new(v1beta1.PGAdmin) pgadmin.Namespace = "some-ns" pgadmin.Name = "pg1" - clusters := map[string]*v1beta1.PostgresClusterList{} + clusters := map[string][]*v1beta1.PostgresCluster{} t.Run("Data,ObjectMeta,TypeMeta", func(t *testing.T) { pgadmin := pgadmin.DeepCopy() diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index 8edb22cd5..55d5461f8 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -110,7 +110,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct var ( configmap *corev1.ConfigMap dataVolume *corev1.PersistentVolumeClaim - clusters map[string]*v1beta1.PostgresClusterList + clusters map[string][]*v1beta1.PostgresCluster _ *corev1.Service ) diff --git a/internal/controller/standalone_pgadmin/postgrescluster.go b/internal/controller/standalone_pgadmin/postgrescluster.go index 5327b8ae7..bc7d28dea 100644 --- a/internal/controller/standalone_pgadmin/postgrescluster.go +++ b/internal/controller/standalone_pgadmin/postgrescluster.go @@ -7,11 +7,11 @@ package standalone_pgadmin import ( "context" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -56,33 +56,31 @@ func (r *PGAdminReconciler) findPGAdminsForPostgresCluster( func (r *PGAdminReconciler) getClustersForPGAdmin( ctx context.Context, pgAdmin *v1beta1.PGAdmin, -) (map[string]*v1beta1.PostgresClusterList, error) { - matching := make(map[string]*v1beta1.PostgresClusterList) +) (map[string][]*v1beta1.PostgresCluster, error) { + matching := make(map[string][]*v1beta1.PostgresCluster) var err error var selector labels.Selector for _, serverGroup := range pgAdmin.Spec.ServerGroups { - cluster := &v1beta1.PostgresCluster{} + var cluster v1beta1.PostgresCluster if serverGroup.PostgresClusterName != "" { - err = r.Get(ctx, types.NamespacedName{ + err = r.Get(ctx, client.ObjectKey{ Name: serverGroup.PostgresClusterName, Namespace: pgAdmin.GetNamespace(), - }, cluster) + }, &cluster) if err == nil { - matching[serverGroup.Name] = &v1beta1.PostgresClusterList{ - Items: []v1beta1.PostgresCluster{*cluster}, - } + matching[serverGroup.Name] = []*v1beta1.PostgresCluster{&cluster} } continue } if selector, err = naming.AsSelector(serverGroup.PostgresClusterSelector); err == nil { - var filteredList v1beta1.PostgresClusterList - err = r.List(ctx, &filteredList, + var list v1beta1.PostgresClusterList + err = r.List(ctx, &list, client.InNamespace(pgAdmin.Namespace), client.MatchingLabelsSelector{Selector: selector}, ) if err == nil { - matching[serverGroup.Name] = &filteredList + matching[serverGroup.Name] = initialize.Pointers(list.Items...) } } } From bb37061a58d5b609d843b7f71380beb1636725d1 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 27 Nov 2024 14:43:43 -0600 Subject: [PATCH 5/5] Simplify controller watches using EnqueueRequestsFromMapFunc --- .../crunchybridgecluster_controller.go | 19 +++- .../bridge/crunchybridgecluster/watches.go | 66 +----------- .../pgupgrade/pgupgrade_controller.go | 31 +----- internal/controller/runtime/reconcile.go | 12 +++ internal/controller/runtime/reconcile_test.go | 27 +++++ .../standalone_pgadmin/controller.go | 13 ++- .../{postgrescluster.go => related.go} | 39 ++++++- .../{watches_test.go => related_test.go} | 0 .../controller/standalone_pgadmin/watches.go | 102 ------------------ 9 files changed, 100 insertions(+), 209 deletions(-) rename internal/controller/standalone_pgadmin/{postgrescluster.go => related.go} (67%) rename internal/controller/standalone_pgadmin/{watches_test.go => related_test.go} (100%) delete mode 100644 internal/controller/standalone_pgadmin/watches.go diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go index 03d67442b..0390417c9 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -20,10 +20,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/controller/runtime" - pgoRuntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -54,15 +55,23 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( For(&v1beta1.CrunchyBridgeCluster{}). Owns(&corev1.Secret{}). // Wake periodically to check Bridge API for all CrunchyBridgeClusters. - // Potentially replace with different requeue times, remove the Watch function - // Smarter: retry after a certain time for each cluster: https://gist.github.com/cbandy/a5a604e3026630c5b08cfbcdfffd2a13 + // Potentially replace with different requeue times + // Smarter: retry after a certain time for each cluster WatchesRawSource( - pgoRuntime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), + runtime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []ctrl.Request { + var list v1beta1.CrunchyBridgeClusterList + _ = r.List(ctx, &list) + return runtime.Requests(initialize.Pointers(list.Items...)...) + }), + ), ). // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters Watches( &corev1.Secret{}, - r.watchForRelatedSecret(), + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { + return runtime.Requests(r.findCrunchyBridgeClustersForSecret(ctx, client.ObjectKeyFromObject(secret))...) + }), ). Complete(r) } diff --git a/internal/bridge/crunchybridgecluster/watches.go b/internal/bridge/crunchybridgecluster/watches.go index 79687b347..37f90577d 100644 --- a/internal/bridge/crunchybridgecluster/watches.go +++ b/internal/bridge/crunchybridgecluster/watches.go @@ -7,48 +7,11 @@ package crunchybridgecluster import ( "context" - "k8s.io/client-go/util/workqueue" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -// watchForRelatedSecret handles create/update/delete events for secrets, -// passing the Secret ObjectKey to findCrunchyBridgeClustersForSecret -func (r *CrunchyBridgeClusterReconciler) watchForRelatedSecret() handler.EventHandler { - handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { - key := client.ObjectKeyFromObject(secret) - - for _, cluster := range r.findCrunchyBridgeClustersForSecret(ctx, key) { - q.Add(ctrl.Request{ - NamespacedName: client.ObjectKeyFromObject(cluster), - }) - } - } - - return handler.Funcs{ - CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.ObjectNew, q) - }, - // If the secret is deleted, we want to reconcile - // in order to emit an event/status about this problem. - // We will also emit a matching event/status about this problem - // when we reconcile the cluster and can't find the secret. - // That way, users will get two alerts: one when the secret is deleted - // and another when the cluster is being reconciled. - DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - } -} - //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list} // findCrunchyBridgeClustersForSecret returns CrunchyBridgeClusters @@ -60,7 +23,7 @@ func (r *CrunchyBridgeClusterReconciler) findCrunchyBridgeClustersForSecret( var clusters v1beta1.CrunchyBridgeClusterList // NOTE: If this becomes slow due to a large number of CrunchyBridgeClusters in a single - // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // namespace, we can configure the [manager.Manager] field indexer and pass a // [fields.Selector] here. // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html if err := r.List(ctx, &clusters, &client.ListOptions{ @@ -74,30 +37,3 @@ func (r *CrunchyBridgeClusterReconciler) findCrunchyBridgeClustersForSecret( } return matching } - -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={list} - -// Watch enqueues all existing CrunchyBridgeClusters for reconciles. -func (r *CrunchyBridgeClusterReconciler) Watch() handler.EventHandler { - return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []reconcile.Request { - log := ctrl.LoggerFrom(ctx) - - crunchyBridgeClusterList := &v1beta1.CrunchyBridgeClusterList{} - if err := r.List(ctx, crunchyBridgeClusterList); err != nil { - log.Error(err, "Error listing CrunchyBridgeClusters.") - } - - reconcileRequests := []reconcile.Request{} - for index := range crunchyBridgeClusterList.Items { - reconcileRequests = append(reconcileRequests, - reconcile.Request{ - NamespacedName: client.ObjectKeyFromObject( - &crunchyBridgeClusterList.Items[index], - ), - }, - ) - } - - return reconcileRequests - }) -} diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index d6d145b79..0717607d7 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -14,10 +14,8 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "github.com/crunchydata/postgres-operator/internal/config" @@ -50,7 +48,9 @@ func (r *PGUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&batchv1.Job{}). Watches( v1beta1.NewPostgresCluster(), - r.watchPostgresClusters(), + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { + return runtime.Requests(r.findUpgradesForPostgresCluster(ctx, client.ObjectKeyFromObject(cluster))...) + }), ). Complete(r) } @@ -80,31 +80,6 @@ func (r *PGUpgradeReconciler) findUpgradesForPostgresCluster( return matching } -// watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. -func (r *PGUpgradeReconciler) watchPostgresClusters() handler.Funcs { - handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { - key := client.ObjectKeyFromObject(cluster) - - for _, upgrade := range r.findUpgradesForPostgresCluster(ctx, key) { - q.Add(ctrl.Request{ - NamespacedName: client.ObjectKeyFromObject(upgrade), - }) - } - } - - return handler.Funcs{ - CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.ObjectNew, q) - }, - DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - } -} - //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades",verbs={get} //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgupgrades/status",verbs={patch} //+kubebuilder:rbac:groups="batch",resources="jobs",verbs={delete} diff --git a/internal/controller/runtime/reconcile.go b/internal/controller/runtime/reconcile.go index a2196d162..e65a66d55 100644 --- a/internal/controller/runtime/reconcile.go +++ b/internal/controller/runtime/reconcile.go @@ -7,9 +7,21 @@ package runtime import ( "time" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +// Requests converts objects to a slice of [reconcile.Request]. +func Requests[T client.Object](objects ...T) []reconcile.Request { + result := make([]reconcile.Request, len(objects)) + for i := range objects { + result[i] = reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(objects[i]), + } + } + return result +} + // ErrorWithBackoff returns a Result and error that indicate a non-nil err // should be logged and measured and its [reconcile.Request] should be retried // later. When err is nil, nothing is logged and the Request is not retried. diff --git a/internal/controller/runtime/reconcile_test.go b/internal/controller/runtime/reconcile_test.go index 925b3cf47..2682ab396 100644 --- a/internal/controller/runtime/reconcile_test.go +++ b/internal/controller/runtime/reconcile_test.go @@ -10,9 +10,36 @@ import ( "time" "gotest.tools/v3/assert" + "gotest.tools/v3/assert/cmp" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +func TestRequests(t *testing.T) { + none := Requests[client.Object]() + assert.Assert(t, none != nil, "does not return nil slice") + assert.DeepEqual(t, none, []reconcile.Request{}) + + assert.Assert(t, cmp.Panics(func() { + Requests[client.Object](nil) + }), "expected nil pointer dereference") + + // Empty request when no metadata. + assert.DeepEqual(t, Requests(new(corev1.Secret)), []reconcile.Request{{}}) + + secret := new(corev1.Secret) + secret.Namespace = "asdf" + + expected := reconcile.Request{} + expected.Namespace = "asdf" + assert.DeepEqual(t, Requests(secret), []reconcile.Request{expected}) + + secret.Name = "123" + expected.Name = "123" + assert.DeepEqual(t, Requests(secret), []reconcile.Request{expected}) +} + func TestErrorWithBackoff(t *testing.T) { result, err := ErrorWithBackoff(nil) assert.Assert(t, result.IsZero()) diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index 55d5461f8..d16c33b79 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -16,8 +16,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" - controllerruntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -46,7 +47,7 @@ type PGAdminReconciler struct { func (r *PGAdminReconciler) SetupWithManager(mgr ctrl.Manager) error { if r.PodExec == nil { var err error - r.PodExec, err = controllerruntime.NewPodExecutor(mgr.GetConfig()) + r.PodExec, err = runtime.NewPodExecutor(mgr.GetConfig()) if err != nil { return err } @@ -61,11 +62,15 @@ func (r *PGAdminReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.Service{}). Watches( v1beta1.NewPostgresCluster(), - r.watchPostgresClusters(), + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cluster client.Object) []ctrl.Request { + return runtime.Requests(r.findPGAdminsForPostgresCluster(ctx, cluster)...) + }), ). Watches( &corev1.Secret{}, - r.watchForRelatedSecret(), + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, secret client.Object) []ctrl.Request { + return runtime.Requests(r.findPGAdminsForSecret(ctx, client.ObjectKeyFromObject(secret))...) + }), ). Complete(r) } diff --git a/internal/controller/standalone_pgadmin/postgrescluster.go b/internal/controller/standalone_pgadmin/related.go similarity index 67% rename from internal/controller/standalone_pgadmin/postgrescluster.go rename to internal/controller/standalone_pgadmin/related.go index bc7d28dea..4af2ea6ef 100644 --- a/internal/controller/standalone_pgadmin/postgrescluster.go +++ b/internal/controller/standalone_pgadmin/related.go @@ -27,10 +27,10 @@ func (r *PGAdminReconciler) findPGAdminsForPostgresCluster( ) // NOTE: If this becomes slow due to a large number of pgadmins in a single - // namespace, we can configure the [ctrl.Manager] field indexer and pass a + // namespace, we can configure the [manager.Manager] field indexer and pass a // [fields.Selector] here. // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html - if r.List(ctx, &pgadmins, &client.ListOptions{ + if r.Client.List(ctx, &pgadmins, &client.ListOptions{ Namespace: cluster.GetNamespace(), }) == nil { for i := range pgadmins.Items { @@ -50,7 +50,36 @@ func (r *PGAdminReconciler) findPGAdminsForPostgresCluster( return matching } -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={list,watch} +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list} + +// findPGAdminsForSecret returns PGAdmins that have a user or users that have their password +// stored in the Secret +func (r *PGAdminReconciler) findPGAdminsForSecret( + ctx context.Context, secret client.ObjectKey, +) []*v1beta1.PGAdmin { + var matching []*v1beta1.PGAdmin + var pgadmins v1beta1.PGAdminList + + // NOTE: If this becomes slow due to a large number of PGAdmins in a single + // namespace, we can configure the [manager.Manager] field indexer and pass a + // [fields.Selector] here. + // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html + if err := r.Client.List(ctx, &pgadmins, &client.ListOptions{ + Namespace: secret.Namespace, + }); err == nil { + for i := range pgadmins.Items { + for j := range pgadmins.Items[i].Spec.Users { + if pgadmins.Items[i].Spec.Users[j].PasswordRef.Name == secret.Name { + matching = append(matching, &pgadmins.Items[i]) + break + } + } + } + } + return matching +} + +//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list} // getClustersForPGAdmin returns clusters managed by the given pgAdmin func (r *PGAdminReconciler) getClustersForPGAdmin( @@ -64,7 +93,7 @@ func (r *PGAdminReconciler) getClustersForPGAdmin( for _, serverGroup := range pgAdmin.Spec.ServerGroups { var cluster v1beta1.PostgresCluster if serverGroup.PostgresClusterName != "" { - err = r.Get(ctx, client.ObjectKey{ + err = r.Client.Get(ctx, client.ObjectKey{ Name: serverGroup.PostgresClusterName, Namespace: pgAdmin.GetNamespace(), }, &cluster) @@ -75,7 +104,7 @@ func (r *PGAdminReconciler) getClustersForPGAdmin( } if selector, err = naming.AsSelector(serverGroup.PostgresClusterSelector); err == nil { var list v1beta1.PostgresClusterList - err = r.List(ctx, &list, + err = r.Client.List(ctx, &list, client.InNamespace(pgAdmin.Namespace), client.MatchingLabelsSelector{Selector: selector}, ) diff --git a/internal/controller/standalone_pgadmin/watches_test.go b/internal/controller/standalone_pgadmin/related_test.go similarity index 100% rename from internal/controller/standalone_pgadmin/watches_test.go rename to internal/controller/standalone_pgadmin/related_test.go diff --git a/internal/controller/standalone_pgadmin/watches.go b/internal/controller/standalone_pgadmin/watches.go deleted file mode 100644 index 49ac1ebd2..000000000 --- a/internal/controller/standalone_pgadmin/watches.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package standalone_pgadmin - -import ( - "context" - - "k8s.io/client-go/util/workqueue" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -// watchPostgresClusters returns a [handler.EventHandler] for PostgresClusters. -func (r *PGAdminReconciler) watchPostgresClusters() handler.Funcs { - handle := func(ctx context.Context, cluster client.Object, q workqueue.RateLimitingInterface) { - for _, pgadmin := range r.findPGAdminsForPostgresCluster(ctx, cluster) { - - q.Add(ctrl.Request{ - NamespacedName: client.ObjectKeyFromObject(pgadmin), - }) - } - } - - return handler.Funcs{ - CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.ObjectNew, q) - }, - DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - } -} - -// watchForRelatedSecret handles create/update/delete events for secrets, -// passing the Secret ObjectKey to findPGAdminsForSecret -func (r *PGAdminReconciler) watchForRelatedSecret() handler.EventHandler { - handle := func(ctx context.Context, secret client.Object, q workqueue.RateLimitingInterface) { - key := client.ObjectKeyFromObject(secret) - - for _, pgadmin := range r.findPGAdminsForSecret(ctx, key) { - q.Add(ctrl.Request{ - NamespacedName: client.ObjectKeyFromObject(pgadmin), - }) - } - } - - return handler.Funcs{ - CreateFunc: func(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.ObjectNew, q) - }, - // If the secret is deleted, we want to reconcile - // in order to emit an event/status about this problem. - // We will also emit a matching event/status about this problem - // when we reconcile the cluster and can't find the secret. - // That way, users will get two alerts: one when the secret is deleted - // and another when the cluster is being reconciled. - DeleteFunc: func(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { - handle(ctx, e.Object, q) - }, - } -} - -//+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list} - -// findPGAdminsForSecret returns PGAdmins that have a user or users that have their password -// stored in the Secret -func (r *PGAdminReconciler) findPGAdminsForSecret( - ctx context.Context, secret client.ObjectKey, -) []*v1beta1.PGAdmin { - var matching []*v1beta1.PGAdmin - var pgadmins v1beta1.PGAdminList - - // NOTE: If this becomes slow due to a large number of PGAdmins in a single - // namespace, we can configure the [ctrl.Manager] field indexer and pass a - // [fields.Selector] here. - // - https://book.kubebuilder.io/reference/watching-resources/externally-managed.html - if err := r.List(ctx, &pgadmins, &client.ListOptions{ - Namespace: secret.Namespace, - }); err == nil { - for i := range pgadmins.Items { - for j := range pgadmins.Items[i].Spec.Users { - if pgadmins.Items[i].Spec.Users[j].PasswordRef.LocalObjectReference.Name == secret.Name { - matching = append(matching, &pgadmins.Items[i]) - break - } - } - } - } - return matching -}