diff --git a/apis/dataprotection/v1alpha1/backup_types.go b/apis/dataprotection/v1alpha1/backup_types.go index 7f8c6433a7d..33ed81c786a 100644 --- a/apis/dataprotection/v1alpha1/backup_types.go +++ b/apis/dataprotection/v1alpha1/backup_types.go @@ -183,6 +183,18 @@ type BackupStatus struct { // +optional VolumeSnapshots []VolumeSnapshotStatus `json:"volumeSnapshots,omitempty"` + // Records the parent backup name for incremental or differential backup. + // When the parent backup is deleted, the backup will also be deleted. + // + // +optional + ParentBackupName string `json:"parentBackupName,omitempty"` + + // Records the base full backup name for incremental backup or differential backup. + // When the base backup is deleted, the backup will also be deleted. + // + // +optional + BaseBackupName string `json:"baseBackupName,omitempty"` + // Records any additional information for the backup. // // +optional diff --git a/apis/dataprotection/v1alpha1/backuppolicy_types.go b/apis/dataprotection/v1alpha1/backuppolicy_types.go index 31fe87f49e7..f47d8022164 100644 --- a/apis/dataprotection/v1alpha1/backuppolicy_types.go +++ b/apis/dataprotection/v1alpha1/backuppolicy_types.go @@ -205,6 +205,12 @@ type BackupMethod struct { // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` Name string `json:"name"` + // The name of the compatible full backup method, used by incremental backups. + // + // +optional + // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + CompatibleMethod string `json:"compatibleMethod,omitempty"` + // Specifies whether to take snapshots of persistent volumes. If true, // the ActionSetName is not required, the controller will use the CSI volume // snapshotter to create the snapshot. diff --git a/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml b/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml index 4b7155e3594..3e601a0aa9b 100644 --- a/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml +++ b/config/crd/bases/apps.kubeblocks.io_backuppolicytemplates.yaml @@ -85,6 +85,11 @@ spec: For volume snapshot backup, the actionSet is not required, the controller will use the CSI volume snapshotter to create the snapshot. type: string + compatibleMethod: + description: The name of the compatible full backup method, + used by incremental backups. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string env: description: Specifies the environment variables for the backup workload. diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml index d7fa06edd4c..f704ff71efa 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backuppolicies.yaml @@ -73,6 +73,11 @@ spec: For volume snapshot backup, the actionSet is not required, the controller will use the CSI volume snapshotter to create the snapshot. type: string + compatibleMethod: + description: The name of the compatible full backup method, + used by incremental backups. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string env: description: Specifies the environment variables for the backup workload. diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml index 76b17beecad..29d573f9e9c 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml @@ -268,6 +268,11 @@ spec: For volume snapshot backup, the actionSet is not required, the controller will use the CSI volume snapshotter to create the snapshot. type: string + compatibleMethod: + description: The name of the compatible full backup method, used + by incremental backups. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string env: description: Specifies the environment variables for the backup workload. @@ -957,6 +962,11 @@ spec: backupRepoName: description: The name of the backup repository. type: string + baseBackupName: + description: |- + Records the base full backup name for incremental backup or differential backup. + When the base backup is deleted, the backup will also be deleted. + type: string completionTimestamp: description: |- Records the time when the backup operation was completed. @@ -1036,6 +1046,11 @@ spec: kopiaRepoPath: description: Records the path of the Kopia repository. type: string + parentBackupName: + description: |- + Records the parent backup name for incremental or differential backup. + When the parent backup is deleted, the backup will also be deleted. + type: string path: description: |- The directory within the backup repository where the backup data is stored. diff --git a/controllers/dataprotection/actionset_controller_test.go b/controllers/dataprotection/actionset_controller_test.go index e53f9df1f5b..3352d5c59e7 100644 --- a/controllers/dataprotection/actionset_controller_test.go +++ b/controllers/dataprotection/actionset_controller_test.go @@ -54,7 +54,7 @@ var _ = Describe("ActionSet Controller test", func() { Context("create a actionSet", func() { It("should be available", func() { - as := testdp.NewFakeActionSet(&testCtx) + as := testdp.NewFakeActionSet(&testCtx, nil) Expect(as).ShouldNot(BeNil()) }) }) diff --git a/controllers/dataprotection/backup_controller.go b/controllers/dataprotection/backup_controller.go index 43f521ac3fc..201d7ab454c 100644 --- a/controllers/dataprotection/backup_controller.go +++ b/controllers/dataprotection/backup_controller.go @@ -241,6 +241,11 @@ func (r *BackupReconciler) deleteBackupFiles(reqCtx intctrlutil.RequestCtx, back // handleDeletingPhase handles the deletion of backup. It will delete the backup CR // and the backup workload(job). func (r *BackupReconciler) handleDeletingPhase(reqCtx intctrlutil.RequestCtx, backup *dpv1alpha1.Backup) (ctrl.Result, error) { + // delete related backups + if err := r.deleteRelatedBackups(reqCtx, backup); err != nil { + return intctrlutil.RequeueWithError(err, reqCtx.Log, "") + } + // if backup phase is Deleting, delete the backup reference workloads, // backup data stored in backup repository and volume snapshots. // TODO(ldm): if backup is being used by restore, do not delete it. @@ -393,22 +398,6 @@ func (r *BackupReconciler) prepareBackupRequest( return nil, err } request.ActionSet = actionSet - - // check continuous backups should have backupschedule label - if request.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeContinuous { - if _, ok := request.Labels[dptypes.BackupScheduleLabelKey]; !ok { - return nil, fmt.Errorf("continuous backup is only allowed to be created by backupSchedule") - } - backupSchedule := &dpv1alpha1.BackupSchedule{} - if err := request.Client.Get(reqCtx.Ctx, client.ObjectKey{Name: backup.Labels[dptypes.BackupScheduleLabelKey], - Namespace: backup.Namespace}, backupSchedule); err != nil { - return nil, err - } - if backupSchedule.Status.Phase != dpv1alpha1.BackupSchedulePhaseAvailable { - return nil, fmt.Errorf("create continuous backup by failed backupschedule %s/%s", - backupSchedule.Namespace, backupSchedule.Name) - } - } } // check encryption config @@ -424,13 +413,25 @@ func (r *BackupReconciler) prepareBackupRequest( } request.BackupPolicy = backupPolicy + request.BackupMethod = backupMethod + + switch dpv1alpha1.BackupType(request.GetBackupType()) { + case dpv1alpha1.BackupTypeIncremental: + request, err = prepare4Incremental(request) + case dpv1alpha1.BackupTypeContinuous: + err = validateContinuousBackup(backup, reqCtx, request.Client) + } + if err != nil { + return nil, err + } + if !snapshotVolumes { // if use volume snapshot, ignore backup repo if err = HandleBackupRepo(request); err != nil { return nil, err } } - request.BackupMethod = backupMethod + return request, nil } @@ -519,6 +520,14 @@ func (r *BackupReconciler) patchBackupStatus( request.Status.Phase = dpv1alpha1.BackupPhaseRunning request.Status.StartTimestamp = &metav1.Time{Time: r.clock.Now().UTC()} + // set status parent backup and base backup name + if request.ParentBackup != nil { + request.Status.ParentBackupName = request.ParentBackup.Name + } + if request.BaseBackup != nil { + request.Status.BaseBackupName = request.BaseBackup.Name + } + if err = dpbackup.SetExpirationByCreationTime(request.Backup); err != nil { return err } @@ -743,6 +752,33 @@ func (r *BackupReconciler) deleteExternalResources( return deleteRelatedObjectList(reqCtx, r.Client, &appsv1.StatefulSetList{}, namespaces, labels) } +// deleteRelatedBackups deletes the related backups. +func (r *BackupReconciler) deleteRelatedBackups( + reqCtx intctrlutil.RequestCtx, + backup *dpv1alpha1.Backup) error { + backupList := &dpv1alpha1.BackupList{} + labels := map[string]string{ + dptypes.BackupPolicyLabelKey: backup.Spec.BackupPolicyName, + } + if err := r.Client.List(reqCtx.Ctx, backupList, + client.InNamespace(backup.Namespace), client.MatchingLabels(labels)); client.IgnoreNotFound(err) != nil { + return err + } + for i := range backupList.Items { + bp := &backupList.Items[i] + // delete backups related to the current backup + // files in the related backup's status.path will be deleted by its own associated deleter + if bp.Status.ParentBackupName != backup.Name && bp.Status.BaseBackupName != backup.Name { + continue + } + if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, bp); err != nil { + return err + } + reqCtx.Log.Info("delete the related backup", "backup", fmt.Sprintf("%s/%s", bp.Namespace, bp.Name)) + } + return nil +} + // PatchBackupObjectMeta patches backup object metaObject include cluster snapshot. func PatchBackupObjectMeta( original *dpv1alpha1.Backup, @@ -956,3 +992,56 @@ func setClusterSnapshotAnnotation(request *dpbackup.Request, cluster *appsv1alph request.Backup.Annotations[constant.ClusterSnapshotAnnotationKey] = *clusterString return nil } + +// validateContinuousBackup validates the continuous backup. +func validateContinuousBackup(backup *dpv1alpha1.Backup, reqCtx intctrlutil.RequestCtx, cli client.Client) error { + // validate if the continuous backup is created by a backupSchedule. + if _, ok := backup.Labels[dptypes.BackupScheduleLabelKey]; !ok { + return fmt.Errorf("continuous backup is only allowed to be created by backupSchedule") + } + backupSchedule := &dpv1alpha1.BackupSchedule{} + if err := cli.Get(reqCtx.Ctx, client.ObjectKey{Name: backup.Labels[dptypes.BackupScheduleLabelKey], + Namespace: backup.Namespace}, backupSchedule); err != nil { + return err + } + if backupSchedule.Status.Phase != dpv1alpha1.BackupSchedulePhaseAvailable { + return fmt.Errorf("create continuous backup by failed backupschedule %s/%s", + backupSchedule.Namespace, backupSchedule.Name) + } + return nil +} + +// prepare4Incremental prepares for incremental backup +func prepare4Incremental(request *dpbackup.Request) (*dpbackup.Request, error) { + // get and validate parent backup + parentBackup, err := GetParentBackup(request.Ctx, request.Client, request.Backup, request.BackupMethod) + if err != nil { + return nil, err + } + parentBackupType, err := dputils.GetBackupTypeByMethodName(request.RequestCtx, + request.Client, parentBackup.Spec.BackupMethod, request.BackupPolicy) + if err != nil { + return nil, err + } + request.ParentBackup = parentBackup + // get and validate base backup + switch parentBackupType { + case dpv1alpha1.BackupTypeFull: + request.BaseBackup = request.ParentBackup + case dpv1alpha1.BackupTypeIncremental: + baseBackup := &dpv1alpha1.Backup{} + baseBackupName := request.ParentBackup.Status.BaseBackupName + if len(baseBackupName) == 0 { + return nil, fmt.Errorf("backup %s/%s base backup name is empty", + request.ParentBackup.Namespace, request.ParentBackup.Name) + } + if err := request.Client.Get(request.Ctx, client.ObjectKey{Name: baseBackupName, + Namespace: request.ParentBackup.Namespace}, baseBackup); err != nil { + return nil, fmt.Errorf("failed to get base backup %s/%s: %w", request.ParentBackup.Namespace, baseBackupName, err) + } + request.BaseBackup = baseBackup + default: + return nil, fmt.Errorf("parent backup type is %s, but only full and incremental backup are supported", parentBackupType) + } + return request, nil +} diff --git a/controllers/dataprotection/backup_controller_test.go b/controllers/dataprotection/backup_controller_test.go index 63778ffdfac..64703ebdbe4 100644 --- a/controllers/dataprotection/backup_controller_test.go +++ b/controllers/dataprotection/backup_controller_test.go @@ -100,6 +100,8 @@ var _ = Describe("Backup Controller test", func() { When("with default settings", func() { var ( + actionSet *dpv1alpha1.ActionSet + incActionSet *dpv1alpha1.ActionSet backupPolicy *dpv1alpha1.BackupPolicy repoPVCName string cluster *appsv1alpha1.Cluster @@ -108,8 +110,9 @@ var _ = Describe("Backup Controller test", func() { ) BeforeEach(func() { - By("creating an actionSet") - actionSet := testdp.NewFakeActionSet(&testCtx) + By("creating actionSets") + actionSet = testdp.NewFakeActionSet(&testCtx, nil) + incActionSet = testdp.NewFakeIncActionSet(&testCtx) By("creating storage provider") _ = testdp.NewFakeStorageProvider(&testCtx, nil) @@ -117,7 +120,7 @@ var _ = Describe("Backup Controller test", func() { By("creating backup repo") _, repoPVCName = testdp.NewFakeBackupRepo(&testCtx, nil) - By("creating a backupPolicy from actionSet: " + actionSet.Name) + By("creating a backupPolicy from actionSets: " + actionSet.Name + ", " + incActionSet.Name) backupPolicy = testdp.NewFakeBackupPolicy(&testCtx, nil) cluster = clusterInfo.Cluster @@ -365,7 +368,7 @@ var _ = Describe("Backup Controller test", func() { })).Should(Succeed()) }) - It("create an backup using fallbackLabelSelector", func() { + It("create a backup using fallbackLabelSelector", func() { podFactory := func(name string) *testapps.MockPodFactory { return testapps.NewPodFactory(testCtx.DefaultNamespace, name). AddAppInstanceLabel(testdp.ClusterName). @@ -684,6 +687,161 @@ var _ = Describe("Backup Controller test", func() { })).Should(Succeed()) }) }) + + Context("create incremental backup", func() { + const ( + incBackupName = "inc-backup-" + scheduleName = "schedule" + ) + var ( + fullBackup *dpv1alpha1.Backup + fullBackupKey types.NamespacedName + now = metav1.Now() + ) + + getJobKey := func(bp *dpv1alpha1.Backup) client.ObjectKey { + return client.ObjectKey{ + Name: dpbackup.GenerateBackupJobName(bp, dpbackup.BackupDataJobNamePrefix+"-0"), + Namespace: bp.Namespace, + } + } + + newFakeIncBackup := func(name, parentName string, scheduled bool) *dpv1alpha1.Backup { + return testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { + backup.Name = name + backup.Spec.BackupMethod = testdp.IncBackupMethodName + backup.Spec.ParentBackupName = parentName + if scheduled { + backup.Labels[dptypes.BackupScheduleLabelKey] = scheduleName + } + }) + } + + step := func() *metav1.Time { + bak := now + now = metav1.Time{Time: now.Add(time.Hour)} + return &bak + } + + mockBackupStatus := func(backup *dpv1alpha1.Backup, parentBackup, baseBackup string) { + backupStatus := dpv1alpha1.BackupStatus{ + Phase: dpv1alpha1.BackupPhaseCompleted, + ParentBackupName: parentBackup, + BaseBackupName: baseBackup, + TimeRange: &dpv1alpha1.BackupTimeRange{ + Start: step(), + End: step(), + }, + } + testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backup), backupStatus) + } + + checkBackupParentAndBase := func(backup *dpv1alpha1.Backup, parentBackup, baseBackup string) { + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, fetched *dpv1alpha1.Backup) { + g.Expect(fetched.Status.ParentBackupName).NotTo(HaveLen(0)) + g.Expect(fetched.Status.ParentBackupName).To(Equal(parentBackup)) + g.Expect(fetched.Status.BaseBackupName).NotTo(HaveLen(0)) + g.Expect(fetched.Status.BaseBackupName).To(Equal(baseBackup)) + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseRunning)) + })).Should(Succeed()) + } + + checkBackupCompleted := func(backup *dpv1alpha1.Backup) { + testdp.PatchK8sJobStatus(&testCtx, getJobKey(backup), batchv1.JobComplete) + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, fetched *dpv1alpha1.Backup) { + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseCompleted)) + })).Should(Succeed()) + } + + checkBackupDeleting := func(backup types.NamespacedName) { + Eventually(testapps.CheckObj(&testCtx, backup, func(g Gomega, fetched *dpv1alpha1.Backup) { + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseDeleting)) + })).Should(Succeed()) + } + + mockIncBackupAndComplete := func(scheduled bool, backupName, parentName, expectedParent, expectedBase string) types.NamespacedName { + incBackup := newFakeIncBackup(backupName, parentName, scheduled) + By("check backup parent and base") + checkBackupParentAndBase(incBackup, expectedParent, expectedBase) + By("check backup completed") + checkBackupCompleted(incBackup) + mockBackupStatus(incBackup, expectedParent, expectedBase) + return client.ObjectKeyFromObject(incBackup) + } + + BeforeEach(func() { + By("creating a full backup from backupPolicy " + testdp.BackupPolicyName) //nolint:goconst + fullBackup = testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { + backup.Labels[dptypes.BackupScheduleLabelKey] = scheduleName + }) + fullBackupKey = client.ObjectKeyFromObject(fullBackup) + }) + + It("creates an incremental backup based on a specific backup", func() { + By("waiting for the full backup " + fullBackupKey.String() + " to complete") + checkBackupCompleted(fullBackup) + mockBackupStatus(fullBackup, "", "") + By("creating an incremental backup from the specific full backup " + fullBackupKey.String()) + incBackup1 := mockIncBackupAndComplete(false, incBackupName+"1", fullBackup.Name, fullBackup.Name, fullBackup.Name) + By("creating an incremental backup from the specific incremental backup " + incBackup1.String()) + _ = mockIncBackupAndComplete(false, incBackupName+"2", incBackup1.Name, incBackup1.Name, fullBackup.Name) + }) + + It("creates an incremental backup without specific backup", func() { + By("waiting for the full backup " + fullBackupKey.String() + " to complete") + checkBackupCompleted(fullBackup) + mockBackupStatus(fullBackup, "", "") + By("creating an incremental backup" + incBackupName + "1 without specific backup") + incBackup1 := mockIncBackupAndComplete(true, incBackupName+"1", "", fullBackup.Name, fullBackup.Name) + By("creating an incremental backup" + incBackupName + "2 without specific backup") + _ = mockIncBackupAndComplete(false, incBackupName+"2", "", incBackup1.Name, fullBackup.Name) + By("creating an incremental backup" + incBackupName + "3 with the schedule label, it prefers the latest schedule backup as parent") + incBackup3 := mockIncBackupAndComplete(true, incBackupName+"3", "", incBackup1.Name, fullBackup.Name) + By("creating an incremental backup" + incBackupName + "4 without the schedule label, it prefers the latest backup as parent") + _ = mockIncBackupAndComplete(false, incBackupName+"4", "", incBackup3.Name, fullBackup.Name) + By("creating a new full backup from backupPolicy " + testdp.BackupPolicyName) + fullBackup1 := testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { + backup.Name = "full-bakcup-1" + }) + fullBackupKey1 := client.ObjectKeyFromObject(fullBackup1) + By("waiting for the full backup " + fullBackupKey1.String() + " to complete") + checkBackupCompleted(fullBackup1) + mockBackupStatus(fullBackup1, "", "") + By("creating an incremental backup " + incBackupName + "5, it prefers the latest full backup as parent") + _ = mockIncBackupAndComplete(false, incBackupName+"5", "", fullBackup1.Name, fullBackup1.Name) + + }) + + It("creates an incremental backup without valid parent backups", func() { + By("creating an incremental backup without specific parent backup") + incBackup1 := newFakeIncBackup(incBackupName+"1", "", false) + incBackupKey1 := client.ObjectKeyFromObject(incBackup1) + By("check backup failed") + Eventually(testapps.CheckObj(&testCtx, incBackupKey1, func(g Gomega, fetched *dpv1alpha1.Backup) { + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseFailed)) + })).Should(Succeed()) + }) + + It("deletes incremental backups", func() { + By("waiting for the full backup to complete, the full backup: " + fullBackupKey.String()) + checkBackupCompleted(fullBackup) + mockBackupStatus(fullBackup, "", "") + By("creating an incremental backup " + incBackupName + "1") + incBackup1 := mockIncBackupAndComplete(false, incBackupName+"1", fullBackup.Name, fullBackup.Name, fullBackup.Name) + By("creating an incremental backup " + incBackupName + "2") + incBackup2 := mockIncBackupAndComplete(false, incBackupName+"2", incBackup1.Name, incBackup1.Name, fullBackup.Name) + By("creating an incremental backup " + incBackupName + "3") + incBackup3 := mockIncBackupAndComplete(false, incBackupName+"3", "", incBackup2.Name, fullBackup.Name) + By("deleting an incremental backup" + incBackupName + "2 will delete its child backup") + testapps.DeleteObject(&testCtx, incBackup2, &dpv1alpha1.Backup{}) + checkBackupDeleting(incBackup2) + checkBackupDeleting(incBackup3) + By("deleting a base backup" + fullBackupKey.String() + " will delete all related incremental backups") + testapps.DeleteObject(&testCtx, fullBackupKey, &dpv1alpha1.Backup{}) + checkBackupDeleting(fullBackupKey) + checkBackupDeleting(incBackup1) + }) + }) }) When("with exceptional settings", func() { @@ -760,7 +918,7 @@ var _ = Describe("Backup Controller test", func() { }) It("should fail because actionSet's backup type isn't Full", func() { - actionSet := testdp.NewFakeActionSet(&testCtx) + actionSet := testdp.NewFakeActionSet(&testCtx, nil) actionSetKey := client.ObjectKeyFromObject(actionSet) Eventually(testapps.GetAndChangeObj(&testCtx, actionSetKey, func(fetched *dpv1alpha1.ActionSet) { fetched.Spec.BackupType = dpv1alpha1.BackupTypeIncremental @@ -779,7 +937,7 @@ var _ = Describe("Backup Controller test", func() { Context("create continuous backup", func() { It("should fail when continuous backup don't have backupschedule label", func() { By("create actionset with continuous backuptype") - actionSet := testdp.NewFakeActionSet(&testCtx) + actionSet := testdp.NewFakeActionSet(&testCtx, nil) actionSetKey := client.ObjectKeyFromObject(actionSet) Eventually(testapps.GetAndChangeObj(&testCtx, actionSetKey, func(fetched *dpv1alpha1.ActionSet) { fetched.Spec.BackupType = dpv1alpha1.BackupTypeContinuous @@ -799,7 +957,7 @@ var _ = Describe("Backup Controller test", func() { It("continue reconcile when continuous backup is Failed after fixing the issue", func() { By("create actionset and backupRepo for continuous backup") - actionSet := testdp.NewFakeActionSet(&testCtx) + actionSet := testdp.NewFakeActionSet(&testCtx, nil) Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(actionSet), func(fetched *dpv1alpha1.ActionSet) { fetched.Spec.BackupType = dpv1alpha1.BackupTypeContinuous })).Should(Succeed()) @@ -882,7 +1040,7 @@ var _ = Describe("Backup Controller test", func() { repo, repoPVCName = testdp.NewFakeBackupRepo(&testCtx, nil) By("creating actionSet") - _ = testdp.NewFakeActionSet(&testCtx) + _ = testdp.NewFakeActionSet(&testCtx, nil) }) Context("explicitly specify backup repo", func() { @@ -1033,7 +1191,7 @@ var _ = Describe("Backup Controller test", func() { BeforeEach(func() { By("creating an actionSet") - actionSet := testdp.NewFakeActionSet(&testCtx) + actionSet := testdp.NewFakeActionSet(&testCtx, nil) By("creating storage provider") _ = testdp.NewFakeStorageProvider(&testCtx, nil) diff --git a/controllers/dataprotection/backuppolicy_controller_test.go b/controllers/dataprotection/backuppolicy_controller_test.go index d2625fa1c65..c9df090a4a6 100644 --- a/controllers/dataprotection/backuppolicy_controller_test.go +++ b/controllers/dataprotection/backuppolicy_controller_test.go @@ -75,7 +75,7 @@ var _ = Describe("BackupPolicy Controller test", func() { It("backup policy should be available for target", func() { By("creating actionSet used by backup policy") - as := testdp.NewFakeActionSet(&testCtx) + as := testdp.NewFakeActionSet(&testCtx, nil) Expect(as).ShouldNot(BeNil()) By("creating backupPolicy and its status should be available") @@ -85,7 +85,7 @@ var _ = Describe("BackupPolicy Controller test", func() { It("test backup policy with targets", func() { By("creating actionSet used by backup policy") - as := testdp.NewFakeActionSet(&testCtx) + as := testdp.NewFakeActionSet(&testCtx, nil) Expect(as).ShouldNot(BeNil()) By("creating backupPolicy") diff --git a/controllers/dataprotection/backupschedule_controller_test.go b/controllers/dataprotection/backupschedule_controller_test.go index 3accdf0d1a3..09b942c1206 100644 --- a/controllers/dataprotection/backupschedule_controller_test.go +++ b/controllers/dataprotection/backupschedule_controller_test.go @@ -98,7 +98,7 @@ var _ = Describe("Backup Schedule Controller", func() { BeforeEach(func() { By("creating an actionSet") - actionSet := testdp.NewFakeActionSet(&testCtx) + actionSet := testdp.NewFakeActionSet(&testCtx, nil) By("creating storage provider") _ = testdp.NewFakeStorageProvider(&testCtx, nil) diff --git a/controllers/dataprotection/gc_controller_test.go b/controllers/dataprotection/gc_controller_test.go index 53a22a320d9..fb1bfbeaecc 100644 --- a/controllers/dataprotection/gc_controller_test.go +++ b/controllers/dataprotection/gc_controller_test.go @@ -94,7 +94,7 @@ var _ = Describe("Data Protection Garbage Collection Controller", func() { BeforeEach(func() { By("creating an actionSet") - actionSet := testdp.NewFakeActionSet(&testCtx) + actionSet := testdp.NewFakeActionSet(&testCtx, nil) By("creating storage provider") _ = testdp.NewFakeStorageProvider(&testCtx, nil) diff --git a/controllers/dataprotection/log_collection_controller_test.go b/controllers/dataprotection/log_collection_controller_test.go index b4efe304ce4..7e96b5e2dfa 100644 --- a/controllers/dataprotection/log_collection_controller_test.go +++ b/controllers/dataprotection/log_collection_controller_test.go @@ -80,7 +80,7 @@ var _ = Describe("Log Collection Controller", func() { BeforeEach(func() { By("creating an actionSet") - actionSet := testdp.NewFakeActionSet(&testCtx) + actionSet := testdp.NewFakeActionSet(&testCtx, nil) By("creating storage provider") _ = testdp.NewFakeStorageProvider(&testCtx, nil) diff --git a/controllers/dataprotection/restore_controller_test.go b/controllers/dataprotection/restore_controller_test.go index cc16c8d077f..bb29811c8f9 100644 --- a/controllers/dataprotection/restore_controller_test.go +++ b/controllers/dataprotection/restore_controller_test.go @@ -23,6 +23,7 @@ import ( "fmt" "strconv" "strings" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -30,6 +31,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -120,7 +122,7 @@ var _ = Describe("Restore Controller test", func() { BeforeEach(func() { By("creating an actionSet") - actionSet = testdp.NewFakeActionSet(&testCtx) + actionSet = testdp.NewFakeActionSet(&testCtx, nil) By("creating storage provider") _ = testdp.NewFakeStorageProvider(&testCtx, nil) @@ -133,11 +135,15 @@ var _ = Describe("Restore Controller test", func() { mockBackupCompleted, useVolumeSnapshot, isSerialPolicy bool, + backupType dpv1alpha1.BackupType, expectRestorePhase dpv1alpha1.RestorePhase, change func(f *testdp.MockRestoreFactory), - changeBackupStatus func(b *dpv1alpha1.Backup)) *dpv1alpha1.Restore { + changeBackupStatus func(b *dpv1alpha1.Backup), + backupNames ...string, + ) *dpv1alpha1.Restore { By("create a completed backup") - backup := mockBackupForRestore(actionSet.Name, repo.Name, repoPVCName, mockBackupCompleted, useVolumeSnapshot) + backup := mockBackupForRestore(actionSet.Name, repo.Name, repoPVCName, mockBackupCompleted, + useVolumeSnapshot, backupType, backupNames...) if changeBackupStatus != nil { Expect(testapps.ChangeObjStatus(&testCtx, backup, func() { changeBackupStatus(backup) @@ -218,7 +224,7 @@ var _ = Describe("Restore Controller test", func() { } testRestoreWithVolumeClaimsTemplate := func(replicas, startingIndex int) { - restore := initResourcesAndWaitRestore(true, false, false, dpv1alpha1.RestorePhaseRunning, + restore := initResourcesAndWaitRestore(true, false, false, "", dpv1alpha1.RestorePhaseRunning, func(f *testdp.MockRestoreFactory) { f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, testdp.DataVolumeMountPath, "", int32(replicas), int32(startingIndex), nil) @@ -238,7 +244,7 @@ var _ = Describe("Restore Controller test", func() { Context("with restore fails", func() { It("test restore is Failed when backup is not completed", func() { By("expect for restore is Failed ") - initResourcesAndWaitRestore(false, false, true, dpv1alpha1.RestorePhaseFailed, + initResourcesAndWaitRestore(false, false, true, "", dpv1alpha1.RestorePhaseFailed, func(f *testdp.MockRestoreFactory) { f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, testdp.DataVolumeMountPath, "", int32(3), int32(0), nil) @@ -247,7 +253,7 @@ var _ = Describe("Restore Controller test", func() { It("test restore is failed when check failed in new action", func() { By("expect for restore is Failed") - restore := initResourcesAndWaitRestore(true, false, true, dpv1alpha1.RestorePhaseFailed, + restore := initResourcesAndWaitRestore(true, false, true, "", dpv1alpha1.RestorePhaseFailed, func(f *testdp.MockRestoreFactory) { f.Get().Spec.Backup.Name = "wrongBackup" }, nil) @@ -260,7 +266,7 @@ var _ = Describe("Restore Controller test", func() { It("test restore is failed when validate failed in new action", func() { By("expect for restore is Failed") - restore := initResourcesAndWaitRestore(false, false, true, dpv1alpha1.RestorePhaseFailed, func(f *testdp.MockRestoreFactory) { + restore := initResourcesAndWaitRestore(false, false, true, "", dpv1alpha1.RestorePhaseFailed, func(f *testdp.MockRestoreFactory) { f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, testdp.DataVolumeMountPath, "", int32(3), int32(0), nil) }, nil) @@ -273,7 +279,7 @@ var _ = Describe("Restore Controller test", func() { It("test restore is Failed when restore job is not Failed", func() { By("expect for restore is Failed ") - restore := initResourcesAndWaitRestore(true, false, true, dpv1alpha1.RestorePhaseRunning, + restore := initResourcesAndWaitRestore(true, false, true, "", dpv1alpha1.RestorePhaseRunning, func(f *testdp.MockRestoreFactory) { f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, testdp.DataVolumeMountPath, "", int32(3), int32(0), nil) @@ -310,7 +316,7 @@ var _ = Describe("Restore Controller test", func() { It("test volumeClaimsTemplate when volumeClaimRestorePolicy is Serial", func() { replicas := 2 startingIndex := 1 - restore := initResourcesAndWaitRestore(true, false, true, dpv1alpha1.RestorePhaseRunning, + restore := initResourcesAndWaitRestore(true, false, true, "", dpv1alpha1.RestorePhaseRunning, func(f *testdp.MockRestoreFactory) { f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, testdp.DataVolumeMountPath, "", int32(replicas), int32(startingIndex), nil) @@ -352,7 +358,7 @@ var _ = Describe("Restore Controller test", func() { }) It("test dataSourceRef", func() { - initResourcesAndWaitRestore(true, true, false, dpv1alpha1.RestorePhaseAsDataSource, + initResourcesAndWaitRestore(true, true, false, "", dpv1alpha1.RestorePhaseAsDataSource, func(f *testdp.MockRestoreFactory) { f.SetDataSourceRef(testdp.DataVolumeName, testdp.DataVolumeMountPath) }, nil) @@ -361,7 +367,7 @@ var _ = Describe("Restore Controller test", func() { It("test when dataRestorePolicy is OneToOne", func() { startingIndex := 0 restoredReplicas := 2 - restore := initResourcesAndWaitRestore(true, false, false, dpv1alpha1.RestorePhaseRunning, + restore := initResourcesAndWaitRestore(true, false, false, "", dpv1alpha1.RestorePhaseRunning, func(f *testdp.MockRestoreFactory) { f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, testdp.DataVolumeMountPath, "", int32(restoredReplicas), int32(startingIndex), nil) @@ -399,7 +405,7 @@ var _ = Describe("Restore Controller test", func() { startingIndex := 0 restoredReplicas := 2 sourcePodName := "pod-0" - restore := initResourcesAndWaitRestore(true, false, false, dpv1alpha1.RestorePhaseRunning, + restore := initResourcesAndWaitRestore(true, false, false, "", dpv1alpha1.RestorePhaseRunning, func(f *testdp.MockRestoreFactory) { f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, testdp.DataVolumeMountPath, "", int32(restoredReplicas), int32(startingIndex), nil) @@ -430,6 +436,7 @@ var _ = Describe("Restore Controller test", func() { By("mock jobs are completed and wait for restore is completed") mockAndCheckRestoreCompleted(restore) }) + }) Context("test postReady stage", func() { @@ -448,7 +455,7 @@ var _ = Describe("Restore Controller test", func() { matchLabels := map[string]string{ constant.AppInstanceLabelKey: testdp.ClusterName, } - restore := initResourcesAndWaitRestore(true, false, false, dpv1alpha1.RestorePhaseRunning, + restore := initResourcesAndWaitRestore(true, false, false, "", dpv1alpha1.RestorePhaseRunning, func(f *testdp.MockRestoreFactory) { f.SetConnectCredential(testdp.ClusterName).SetJobActionConfig(matchLabels).SetExecActionConfig(matchLabels) }, nil) @@ -480,17 +487,102 @@ var _ = Describe("Restore Controller test", func() { Context("test cross namespace", func() { It("should wait for preparation of backup repo", func() { By("creating a restore in a different namespace from backup") - initResourcesAndWaitRestore(true, false, true, dpv1alpha1.RestorePhaseRunning, + initResourcesAndWaitRestore(true, false, true, "", dpv1alpha1.RestorePhaseRunning, func(f *testdp.MockRestoreFactory) { f.SetNamespace(namespace2) }, nil) }) }) + + Context("test restore from incremental backup", func() { + var ( + baseBackup *dpv1alpha1.Backup + parentBackupName string + ancestorBackups = []*dpv1alpha1.Backup{} + cnt = 0 + ) + + genIncBackupName := func() string { + cnt++ + return fmt.Sprintf("inc-backup-%d", cnt) + } + + BeforeEach(func() { + By("mock completed full backup and parent incremental backup") + baseBackup = mockBackupForRestore(actionSet.Name, repo.Name, repoPVCName, true, false, dpv1alpha1.BackupTypeFull) + actionSet = testdp.NewFakeIncActionSet(&testCtx) + parentBackupName = baseBackup.Name + for i := 0; i < 3; i++ { + backup := mockBackupForRestore(actionSet.Name, repo.Name, repoPVCName, true, false, dpv1alpha1.BackupTypeIncremental, + genIncBackupName(), parentBackupName, baseBackup.Name) + ancestorBackups = append(ancestorBackups, backup) + parentBackupName = backup.Name + } + }) + + AfterEach(func() { + ancestorBackups = []*dpv1alpha1.Backup{} + cnt = 0 + }) + + It("test restore from incremental backup", func() { + replicas, startingIndex := 3, 0 + restore := initResourcesAndWaitRestore(true, false, false, dpv1alpha1.BackupTypeIncremental, dpv1alpha1.RestorePhaseRunning, + func(f *testdp.MockRestoreFactory) { + f.SetVolumeClaimsTemplate(testdp.MysqlTemplateName, testdp.DataVolumeName, + testdp.DataVolumeMountPath, "", int32(replicas), int32(startingIndex), nil) + f.SetPrepareDataRequiredPolicy(dpv1alpha1.OneToOneRestorePolicy, "") + }, nil, genIncBackupName(), parentBackupName, baseBackup.Name) + + By("wait for creating jobs and pvcs") + checkJobAndPVCSCount(restore, replicas, replicas, 0) + By("check job env") + ancestorIncrementalBackupNames := []string{} + for _, backup := range ancestorBackups { + ancestorIncrementalBackupNames = append(ancestorIncrementalBackupNames, backup.Name) + } + expectedEnv := map[string]string{ + dptypes.DPAncestorIncrementalBackupNames: strings.Join(ancestorIncrementalBackupNames, ","), + dptypes.DPBaseBackupName: baseBackup.Name, + } + jobList := &batchv1.JobList{} + Expect(k8sClient.List(ctx, jobList, + client.MatchingLabels{dprestore.DataProtectionRestoreLabelKey: restore.Name}, + client.InNamespace(testCtx.DefaultNamespace))).Should(Succeed()) + for _, job := range jobList.Items { + cnt := 0 + for _, env := range job.Spec.Template.Spec.Containers[0].Env { + if value, ok := expectedEnv[env.Name]; ok { + Expect(env.Value).Should(Equal(value)) + cnt++ + } + } + Expect(cnt).To(Equal(len(expectedEnv))) + } + By("mock jobs are completed and wait for restore is completed") + mockAndCheckRestoreCompleted(restore) + }) + }) }) }) -func mockBackupForRestore(actionSetName, repoName, backupPVCName string, mockBackupCompleted, useVolumeSnapshotBackup bool) *dpv1alpha1.Backup { - backup := testdp.NewFakeBackup(&testCtx, nil) +func mockBackupForRestore( + actionSetName, repoName, backupPVCName string, + mockBackupCompleted, useVolumeSnapshotBackup bool, + backupType dpv1alpha1.BackupType, + backupNames ...string, +) *dpv1alpha1.Backup { + backup := testdp.NewFakeBackup(&testCtx, func(backup *dpv1alpha1.Backup) { + if len(backupNames) > 0 { + backup.Name = backupNames[0] + } + if backupType == dpv1alpha1.BackupTypeIncremental { + if len(backupNames) > 1 { + backup.Spec.ParentBackupName = backupNames[1] + } + backup.Spec.BackupMethod = testdp.IncBackupMethodName + } + }) // wait for backup is failed by backup controller. // it will be failed if the backupPolicy is not created. Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(backup), func(g Gomega, tmpBackup *dpv1alpha1.Backup) { @@ -505,12 +597,27 @@ func mockBackupForRestore(actionSetName, repoName, backupPVCName string, mockBac backupMethodName = testdp.VSBackupMethodName testdp.MockBackupVSStatusActions(backup) } - backup.Status.Path = "/backup-data" + if backupType == dpv1alpha1.BackupTypeIncremental { + backupMethodName = testdp.IncBackupMethodName + if len(backupNames) > 2 { + backup.Status.ParentBackupName = backupNames[1] + backup.Status.BaseBackupName = backupNames[2] + } + } + backup.Status.Path = "/backup-data" + "/" + backup.Name backup.Status.Phase = dpv1alpha1.BackupPhaseCompleted backup.Status.BackupRepoName = repoName backup.Status.PersistentVolumeClaimName = backupPVCName testdp.MockBackupStatusTarget(backup, dpv1alpha1.PodSelectionStrategyAny) testdp.MockBackupStatusMethod(backup, backupMethodName, testdp.DataVolumeName, actionSetName) + backup.Status.TimeRange = &dpv1alpha1.BackupTimeRange{ + Start: &metav1.Time{}, + End: &metav1.Time{}, + } + fakeClock.Step(time.Hour) + backup.Status.TimeRange.Start.Time = fakeClock.Now() + fakeClock.Step(time.Hour) + backup.Status.TimeRange.End.Time = fakeClock.Now() })).Should(Succeed()) } return backup diff --git a/controllers/dataprotection/utils.go b/controllers/dataprotection/utils.go index ab576c9b021..035fb36a846 100644 --- a/controllers/dataprotection/utils.go +++ b/controllers/dataprotection/utils.go @@ -24,6 +24,7 @@ import ( "encoding/json" "fmt" "reflect" + "sort" "strings" "sync" @@ -581,6 +582,179 @@ func fromFlattenName(flatten string) (name string, namespace string) { return } +// GetParentBackup returns the parent backup of the backup. +// If parentBackupName is specified, the backup should be a on-demand backup, +// then validate and return the parent backup. +// If parentBackupName is not specified, find the latest valid parent backup. +func GetParentBackup(ctx context.Context, cli client.Client, backup *dpv1alpha1.Backup, + backupMethod *dpv1alpha1.BackupMethod) (*dpv1alpha1.Backup, error) { + if backup == nil || backupMethod == nil { + return nil, fmt.Errorf("backup or backupMethod is nil") + } + var scheduleName string + if schedule, ok := backup.Labels[dptypes.BackupScheduleLabelKey]; ok && len(schedule) > 0 { + scheduleName = schedule + } + parentBackup := &dpv1alpha1.Backup{} + if len(backup.Spec.ParentBackupName) != 0 { + // only on-demand backup can specify parent backup + if len(scheduleName) != 0 { + return nil, fmt.Errorf("schedule backup cannot specify parent backup") + } + if err := cli.Get(ctx, client.ObjectKey{ + Namespace: backup.Namespace, + Name: backup.Spec.ParentBackupName, + }, parentBackup); err != nil { + return nil, err + } + if err := ValidateParentBackup(backup, parentBackup, backupMethod); err != nil { + return nil, fmt.Errorf("failed to validate specified parent backup %s: %w", backup.Spec.ParentBackupName, err) + } + return parentBackup, nil + } + parentBackup, err := FindParentBackupIfNotSet(ctx, cli, backup, backupMethod, scheduleName) + if err != nil { + return nil, fmt.Errorf("failed to find parent backup: %w", err) + } + if parentBackup == nil { + return nil, fmt.Errorf("failed to find a valid parent backup for backup %s/%s", backup.Namespace, backup.Name) + } + return parentBackup, nil +} + +// FindParentBackupIfNotSet finds the latest valid parent backup for the incremental backup. +// a. return the latest full backup when it is newer than the base backup of the latest incremental backup, +// or when the base backup of the latest incremental backup is not found. +// b. return the latest incremental backup. +// c. return the latest full backup if incremental backups are not found. +// For scheduled backups, find the parent within scheduled backups, which have the schedule label, +// if not found, find the full backup as the parent within all backups. +// For on-demand backups, find the parent within all backups. +func FindParentBackupIfNotSet(ctx context.Context, cli client.Client, backup *dpv1alpha1.Backup, + backupMethod *dpv1alpha1.BackupMethod, scheduleName string) (*dpv1alpha1.Backup, error) { + getLatestBackup := func(backupList []*dpv1alpha1.Backup) *dpv1alpha1.Backup { + if len(backupList) == 0 { + return nil + } + // sort by stop time in descending order + sort.Slice(backupList, func(i, j int) bool { + i, j = j, i + return dputils.CompareWithBackupStopTime(*backupList[i], *backupList[j]) + }) + return backupList[0] + } + getLatestParentBackup := func(labels map[string]string, incremental bool) (*dpv1alpha1.Backup, error) { + backupList := &dpv1alpha1.BackupList{} + if err := cli.List(ctx, backupList, client.InNamespace(backup.Namespace), + client.MatchingLabels(labels)); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + filteredbackupList := FilterParentBackups(backupList, backup, backupMethod, incremental) + return getLatestBackup(filteredbackupList), nil + } + + labelMap := map[string]string{} + // with backup policy label + labelMap[dptypes.BackupPolicyLabelKey] = backup.Spec.BackupPolicyName + // with the schedule label if specified schedule + if len(scheduleName) != 0 { + labelMap[dptypes.BackupScheduleLabelKey] = scheduleName + } + // 1. get the latest incremental backups + labelMap[dptypes.BackupTypeLabelKey] = string(dpv1alpha1.BackupTypeIncremental) + latestIncrementalBackup, err := getLatestParentBackup(labelMap, true) + if err != nil { + return nil, err + } + // 2. get the latest full backups + labelMap[dptypes.BackupTypeLabelKey] = string(dpv1alpha1.BackupTypeFull) + latestFullBackup, err := getLatestParentBackup(labelMap, false) + if err != nil { + return nil, err + } + // 3. prefer the latest backup; if it is an incremental backup, it should be based on the latest full backup. + if latestIncrementalBackup != nil && latestFullBackup != nil { + if !dputils.CompareWithBackupStopTime(*latestIncrementalBackup, *latestFullBackup) && + latestIncrementalBackup.Status.BaseBackupName == latestFullBackup.Name { + return latestIncrementalBackup, nil + } + // the base backup of the latest incremental backup is not found, + // or the latest full backup is newer than the base backup of the latest incremental backup + return latestFullBackup, nil + } + // 4. get the latest unscheduled full backup if scheduled backups not found + if len(scheduleName) != 0 && latestFullBackup == nil { + delete(labelMap, dptypes.BackupScheduleLabelKey) + latestFullBackup, err = getLatestParentBackup(labelMap, false) + if err != nil { + return nil, err + } + } + // 5. only full backup found + if latestFullBackup != nil { + return latestFullBackup, nil + } + // illegal case: no full backup found but incremental backup found + if latestIncrementalBackup != nil { + return nil, fmt.Errorf("illegal incremental backup %s/%s", latestIncrementalBackup.Namespace, + latestIncrementalBackup.Name) + } + // 6. no backup found + return nil, nil +} + +// FilterParentBackups filters the parent backups by backup phase, backup method and end time. +func FilterParentBackups(backupList *dpv1alpha1.BackupList, targetBackup *dpv1alpha1.Backup, + backupMethod *dpv1alpha1.BackupMethod, incremental bool) []*dpv1alpha1.Backup { + var res []*dpv1alpha1.Backup + if backupList == nil || len(backupList.Items) == 0 { + return res + } + for i, backup := range backupList.Items { + if err := ValidateParentBackup(targetBackup, &backup, backupMethod); err != nil { + continue + } + // backups are listed by backup type label, validate if the backup method matches + // the backup type specified by label value. + if incremental { + if backup.Spec.BackupMethod != targetBackup.Spec.BackupMethod { + continue + } + } else { + if backup.Spec.BackupMethod != backupMethod.CompatibleMethod { + continue + } + } + res = append(res, &backupList.Items[i]) + } + return res +} + +// ValidateParentBackup validates the parent backup. +func ValidateParentBackup(backup *dpv1alpha1.Backup, parentBackup *dpv1alpha1.Backup, + backupMethod *dpv1alpha1.BackupMethod) error { + // validate parent backup is completed + if parentBackup.Status.Phase != dpv1alpha1.BackupPhaseCompleted { + return fmt.Errorf("parent backup %s/%s is not completed", parentBackup.Namespace, parentBackup.Name) + } + // validate if parent backup policy is consistent with the backup policy + if parentBackup.Spec.BackupPolicyName != backup.Spec.BackupPolicyName { + return fmt.Errorf("parent backup %s/%s policy %s is not consistent with the backup", + parentBackup.Namespace, parentBackup.Name, parentBackup.Spec.BackupPolicyName) + } + // validate if parent backup method is compatible with the backup method + if backup.Spec.BackupMethod != parentBackup.Spec.BackupMethod && + backupMethod.CompatibleMethod != parentBackup.Spec.BackupMethod { + return fmt.Errorf("parent backup %s/%s method %s is invalid for incremental backup", + parentBackup.Namespace, parentBackup.Name, parentBackup.Spec.BackupMethod) + } + // valiate parent end time + if parentBackup.GetEndTime().IsZero() { + return fmt.Errorf("parent backup %s/%s end time is zero", parentBackup.Namespace, parentBackup.Name) + } + return nil +} + // restore functions func getPopulatePVCName(pvcUID types.UID) string { diff --git a/controllers/dataprotection/volumepopulator_controller_test.go b/controllers/dataprotection/volumepopulator_controller_test.go index e7502f9cd17..571ddd0038c 100644 --- a/controllers/dataprotection/volumepopulator_controller_test.go +++ b/controllers/dataprotection/volumepopulator_controller_test.go @@ -101,7 +101,7 @@ var _ = Describe("Volume Populator Controller test", func() { BeforeEach(func() { By("create actionSet") - actionSet = testdp.NewFakeActionSet(&testCtx) + actionSet = testdp.NewFakeActionSet(&testCtx, nil) }) initResources := func(volumeBinding storagev1.VolumeBindingMode, useVolumeSnapshotBackup, mockBackupCompleted bool) *corev1.PersistentVolumeClaim { @@ -109,7 +109,7 @@ var _ = Describe("Volume Populator Controller test", func() { createStorageClass(volumeBinding) By("create backup") - backup := mockBackupForRestore(actionSet.Name, "", "", mockBackupCompleted, useVolumeSnapshotBackup) + backup := mockBackupForRestore(actionSet.Name, "", "", mockBackupCompleted, useVolumeSnapshotBackup, "") By("create restore ") restore := testdp.NewRestoreFactory(testCtx.DefaultNamespace, testdp.RestoreName). diff --git a/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml b/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml index 4b7155e3594..3e601a0aa9b 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_backuppolicytemplates.yaml @@ -85,6 +85,11 @@ spec: For volume snapshot backup, the actionSet is not required, the controller will use the CSI volume snapshotter to create the snapshot. type: string + compatibleMethod: + description: The name of the compatible full backup method, + used by incremental backups. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string env: description: Specifies the environment variables for the backup workload. diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml index d7fa06edd4c..f704ff71efa 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backuppolicies.yaml @@ -73,6 +73,11 @@ spec: For volume snapshot backup, the actionSet is not required, the controller will use the CSI volume snapshotter to create the snapshot. type: string + compatibleMethod: + description: The name of the compatible full backup method, + used by incremental backups. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string env: description: Specifies the environment variables for the backup workload. diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml index 76b17beecad..29d573f9e9c 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml @@ -268,6 +268,11 @@ spec: For volume snapshot backup, the actionSet is not required, the controller will use the CSI volume snapshotter to create the snapshot. type: string + compatibleMethod: + description: The name of the compatible full backup method, used + by incremental backups. + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string env: description: Specifies the environment variables for the backup workload. @@ -957,6 +962,11 @@ spec: backupRepoName: description: The name of the backup repository. type: string + baseBackupName: + description: |- + Records the base full backup name for incremental backup or differential backup. + When the base backup is deleted, the backup will also be deleted. + type: string completionTimestamp: description: |- Records the time when the backup operation was completed. @@ -1036,6 +1046,11 @@ spec: kopiaRepoPath: description: Records the path of the Kopia repository. type: string + parentBackupName: + description: |- + Records the parent backup name for incremental or differential backup. + When the parent backup is deleted, the backup will also be deleted. + type: string path: description: |- The directory within the backup repository where the backup data is stored. diff --git a/docs/developer_docs/api-reference/backup.md b/docs/developer_docs/api-reference/backup.md index 398821b5a83..fbe594dad39 100644 --- a/docs/developer_docs/api-reference/backup.md +++ b/docs/developer_docs/api-reference/backup.md @@ -1807,6 +1807,18 @@ string +compatibleMethod
+ +string + + + +(Optional) +

The name of the compatible full backup method, used by incremental backups.

+ + + + snapshotVolumes
bool @@ -2985,6 +2997,32 @@ EncryptionConfig +parentBackupName
+ +string + + + +(Optional) +

Records the parent backup name for incremental or differential backup. +When the parent backup is deleted, the backup will also be deleted.

+ + + + +baseBackupName
+ +string + + + +(Optional) +

Records the base full backup name for incremental backup or differential backup. +When the base backup is deleted, the backup will also be deleted.

+ + + + extras
[]string diff --git a/pkg/dataprotection/backup/request.go b/pkg/dataprotection/backup/request.go index f3bcf6dd08a..24caff124ef 100644 --- a/pkg/dataprotection/backup/request.go +++ b/pkg/dataprotection/backup/request.go @@ -65,6 +65,8 @@ type Request struct { WorkerServiceAccount string SnapshotVolumes bool Target *dpv1alpha1.BackupTarget + ParentBackup *dpv1alpha1.Backup + BaseBackup *dpv1alpha1.Backup } func (r *Request) GetBackupType() string { @@ -170,7 +172,7 @@ func (r *Request) buildBackupDataAction(targetPod *corev1.Pod, name string) (act backupDataAct := r.ActionSet.Spec.Backup.BackupData switch r.ActionSet.Spec.BackupType { - case dpv1alpha1.BackupTypeFull: + case dpv1alpha1.BackupTypeFull, dpv1alpha1.BackupTypeIncremental: podSpec, err := r.BuildJobActionPodSpec(targetPod, BackupDataContainerName, &backupDataAct.JobActionSpec) if err != nil { return nil, fmt.Errorf("failed to build job action pod spec: %w", err) @@ -319,10 +321,6 @@ func (r *Request) BuildJobActionPodSpec(targetPod *corev1.Pod, Name: dptypes.DPBackupName, Value: r.Backup.Name, }, - { - Name: dptypes.DPParentBackupName, - Value: r.Backup.Spec.ParentBackupName, - }, { Name: dptypes.DPTargetPodName, Value: targetPod.Name, @@ -359,6 +357,27 @@ func (r *Request) BuildJobActionPodSpec(targetPod *corev1.Pod, setKBClusterEnv(constant.AppInstanceLabelKey, constant.KBEnvClusterName) setKBClusterEnv(constant.KBAppComponentLabelKey, constant.KBEnvCompName) envVars = append(envVars, corev1.EnvVar{Name: constant.KBEnvNamespace, Value: r.Namespace}) + if r.ParentBackup != nil && r.BaseBackup != nil { + // build envs for incremental backups + envVars = append(envVars, []corev1.EnvVar{ + { + Name: dptypes.DPParentBackupName, + Value: r.ParentBackup.Name, + }, + { + Name: dptypes.DPBaseBackupName, + Value: r.BaseBackup.Name, + }, + { + Name: dptypes.DPTargetRelativePath, + Value: BuildTargetRelativePath(r.Target, targetPod.Name), + }, + { + Name: dptypes.DPBackupRootPath, + Value: BuildBackupRootPath(r.Backup, r.BackupRepo.Spec.PathPrefix, r.BackupPolicy.Spec.PathPrefix), + }, + }...) + } return utils.MergeEnv(envVars, r.BackupMethod.Env) } diff --git a/pkg/dataprotection/backup/utils.go b/pkg/dataprotection/backup/utils.go index 5735ff2453e..7d6c0ccd56f 100644 --- a/pkg/dataprotection/backup/utils.go +++ b/pkg/dataprotection/backup/utils.go @@ -195,10 +195,17 @@ func GenerateLegacyCRNameByBackupSchedule(backupSchedule *dpv1alpha1.BackupSched // BuildBaseBackupPath builds the path to storage backup data in backup repository. func BuildBaseBackupPath(backup *dpv1alpha1.Backup, repoPathPrefix, pathPrefix string) string { + backupRootPath := BuildBackupRootPath(backup, repoPathPrefix, pathPrefix) + // pattern: ${repoPathPrefix}/${namespace}/${pathPrefix}/${backupName} + return filepath.Join("/", backupRootPath, backup.Name) +} + +// BuildBackupRootPath builds the root path to storage backup data in backup repository. +func BuildBackupRootPath(backup *dpv1alpha1.Backup, repoPathPrefix, pathPrefix string) string { repoPathPrefix = strings.Trim(repoPathPrefix, "/") pathPrefix = strings.Trim(pathPrefix, "/") - // pattern: ${repoPathPrefix}/${namespace}/${pathPrefix}/${backupName} - return filepath.Join("/", repoPathPrefix, backup.Namespace, pathPrefix, backup.Name) + // pattern: ${repoPathPrefix}/${namespace}/${pathPrefix} + return filepath.Join("/", repoPathPrefix, backup.Namespace, pathPrefix) } // BuildBackupPathByTarget builds the backup by target.name and podSelectionStrategy. @@ -208,13 +215,21 @@ func BuildBackupPathByTarget(backup *dpv1alpha1.Backup, pathPrefix, targetPodName string) string { baseBackupPath := BuildBaseBackupPath(backup, repoPathPrefix, pathPrefix) + targetRelativePath := BuildTargetRelativePath(target, targetPodName) + return filepath.Join("/", baseBackupPath, targetRelativePath) +} + +// BuildTargetRelativePath builds the relative path by target.name and podSelectionStrategy. +func BuildTargetRelativePath(target *dpv1alpha1.BackupTarget, targetPodName string) string { + path := "" if target.Name != "" { - baseBackupPath = filepath.Join("/", baseBackupPath, target.Name) + path = filepath.Join(path, target.Name) } if target.PodSelector.Strategy == dpv1alpha1.PodSelectionStrategyAll { - baseBackupPath = filepath.Join("/", baseBackupPath, targetPodName) + path = filepath.Join(path, targetPodName) } - return baseBackupPath + // return ${DP_TARGET_RELATIVE_PATH} + return path } // BuildKopiaRepoPath builds the path of kopia repository. diff --git a/pkg/dataprotection/restore/builder.go b/pkg/dataprotection/restore/builder.go index 588b054d701..a968b9373a4 100644 --- a/pkg/dataprotection/restore/builder.go +++ b/pkg/dataprotection/restore/builder.go @@ -22,7 +22,6 @@ package restore import ( "encoding/json" "fmt" - "path/filepath" "strconv" "strings" "time" @@ -192,15 +191,17 @@ func (r *restoreJobBuilder) addCommonEnv(sourceTargetPodName string) *restoreJob // add common env filePath := r.backupSet.Backup.Status.Path if filePath != "" { - // append targetName in backup path - if r.restore.Spec.Backup.SourceTargetName != "" { - filePath = filepath.Join("/", filePath, r.restore.Spec.Backup.SourceTargetName) - } - // append sourceTargetPodName in backup path - if sourceTargetPodName != "" { - filePath = filepath.Join("/", filePath, sourceTargetPodName) + r.env = append(r.env, BackupFilePathEnv(filePath, r.restore.Spec.Backup.SourceTargetName, sourceTargetPodName)...) + } + if r.backupSet.BaseBackup != nil { + r.env = append(r.env, corev1.EnvVar{Name: dptypes.DPBaseBackupName, Value: r.backupSet.BaseBackup.Name}) + } + if len(r.backupSet.AncestorIncrementalBackups) > 0 { + ancestorIncrementalBackupNames := []string{} + for _, backup := range r.backupSet.AncestorIncrementalBackups { + ancestorIncrementalBackupNames = append(ancestorIncrementalBackupNames, backup.Name) } - r.env = append(r.env, corev1.EnvVar{Name: dptypes.DPBackupBasePath, Value: filePath}) + r.env = append(r.env, corev1.EnvVar{Name: dptypes.DPAncestorIncrementalBackupNames, Value: strings.Join(ancestorIncrementalBackupNames, ",")}) } // add time env actionSetEnv := r.backupSet.ActionSet.Spec.Env diff --git a/pkg/dataprotection/restore/manager.go b/pkg/dataprotection/restore/manager.go index 0af5d42905d..4ab2085eb10 100644 --- a/pkg/dataprotection/restore/manager.go +++ b/pkg/dataprotection/restore/manager.go @@ -46,6 +46,8 @@ import ( type BackupActionSet struct { Backup *dpv1alpha1.Backup + // set it when the backup relies on incremental backups, such as Incremental backup + AncestorIncrementalBackups []*dpv1alpha1.Backup // set it when the backup relies on a base backup, such as Continuous backup BaseBackup *dpv1alpha1.Backup ActionSet *dpv1alpha1.ActionSet @@ -107,37 +109,35 @@ func (r *RestoreManager) BuildDifferentialBackupActionSets(reqCtx intctrlutil.Re return nil } -// BuildIncrementalBackupActionSets builds the backupActionSets for specified incremental backup. -func (r *RestoreManager) BuildIncrementalBackupActionSets(reqCtx intctrlutil.RequestCtx, cli client.Client, sourceBackupSet BackupActionSet) error { - r.SetBackupSets(sourceBackupSet) - if sourceBackupSet.ActionSet != nil && sourceBackupSet.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeIncremental { +// BuildIncrementalBackupActionSet builds the backupActionSet for specified incremental backup. +func (r *RestoreManager) BuildIncrementalBackupActionSet(reqCtx intctrlutil.RequestCtx, cli client.Client, sourceBackupSet BackupActionSet) error { + childBackupSet := &sourceBackupSet + backupMap := map[string]struct{}{} + for childBackupSet.ActionSet != nil && childBackupSet.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeIncremental { + // record the traversed backups + backupMap[childBackupSet.Backup.Name] = struct{}{} // get the parent BackupActionSet for incremental. - backupSet, err := r.GetBackupActionSetByNamespaced(reqCtx, cli, sourceBackupSet.Backup.Spec.ParentBackupName, sourceBackupSet.Backup.Namespace) + backupSet, err := r.GetBackupActionSetByNamespaced(reqCtx, cli, childBackupSet.Backup.Status.ParentBackupName, childBackupSet.Backup.Namespace) if err != nil || backupSet == nil { - return err + return intctrlutil.NewFatalError(fmt.Sprintf(`fails to get parent backup "%s" of incremental backup "%s"`, + childBackupSet.Backup.Status.ParentBackupName, childBackupSet.Backup.Name)) } - return r.BuildIncrementalBackupActionSets(reqCtx, cli, *backupSet) - } - // if reaches full backup, sort the BackupActionSets and return - sortBackupSets := func(backupSets []BackupActionSet, reverse bool) []BackupActionSet { - sort.Slice(backupSets, func(i, j int) bool { - if reverse { - i, j = j, i - } - backupI := backupSets[i].Backup - backupJ := backupSets[j].Backup - if backupI == nil { - return false - } - if backupJ == nil { - return true - } - return CompareWithBackupStopTime(*backupI, *backupJ) - }) - return backupSets + if _, ok := backupMap[backupSet.Backup.Name]; ok { + return intctrlutil.NewFatalError(fmt.Sprintf(`backup "%s" relies on child backup "%s"`, + childBackupSet.Backup.Name, backupSet.Backup.Name)) + } + if err := ValidateParentBackupSet(backupSet, childBackupSet); err != nil { + return intctrlutil.NewFatalError(fmt.Sprintf(`fails to validate parent backup "%s" and child backup "%s": %v`, + backupSet.Backup.Name, childBackupSet.Backup.Name, err)) + } + if backupSet.ActionSet != nil && backupSet.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeIncremental { + sourceBackupSet.AncestorIncrementalBackups = append([]*dpv1alpha1.Backup{backupSet.Backup}, sourceBackupSet.AncestorIncrementalBackups...) + } else { + sourceBackupSet.BaseBackup = backupSet.Backup + } + childBackupSet = backupSet } - r.PrepareDataBackupSets = sortBackupSets(r.PrepareDataBackupSets, false) - r.PostReadyBackupSets = sortBackupSets(r.PostReadyBackupSets, false) + r.SetBackupSets(sourceBackupSet) return nil } @@ -165,70 +165,86 @@ func (r *RestoreManager) BuildContinuousRestoreManager(reqCtx intctrlutil.Reques return nil } } - fullBackupSet, err := r.getFullBackupActionSetForContinuous(reqCtx, cli, continuousBackup, metav1.NewTime(restoreTime)) - if err != nil || fullBackupSet == nil { + + backupSet, err := r.getBackupActionSetForContinuous(reqCtx, cli, continuousBackup, metav1.NewTime(restoreTime)) + if err != nil || backupSet == nil { return err } // set base backup - continuousBackupSet.BaseBackup = fullBackupSet.Backup - r.SetBackupSets(*fullBackupSet, continuousBackupSet) + continuousBackupSet.BaseBackup = backupSet.Backup + if backupSet.ActionSet != nil && backupSet.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeIncremental { + if err = r.BuildIncrementalBackupActionSet(reqCtx, cli, *backupSet); err != nil { + return err + } + r.SetBackupSets(continuousBackupSet) + } else { + r.SetBackupSets(*backupSet, continuousBackupSet) + } return nil } -// getFullBackupActionSetForContinuous gets full backup and actionSet for continuous. -func (r *RestoreManager) getFullBackupActionSetForContinuous(reqCtx intctrlutil.RequestCtx, cli client.Client, continuousBackup *dpv1alpha1.Backup, restoreTime metav1.Time) (*BackupActionSet, error) { - notFoundLatestFullBackup := func() (*BackupActionSet, error) { - return nil, intctrlutil.NewFatalError(fmt.Sprintf(`can not found latest full backup based on backupPolicy "%s" and specified restoreTime "%s"`, +// getBackupActionSetForContinuous gets full or incremental backup and actionSet for continuous. +func (r *RestoreManager) getBackupActionSetForContinuous(reqCtx intctrlutil.RequestCtx, cli client.Client, continuousBackup *dpv1alpha1.Backup, restoreTime metav1.Time) (*BackupActionSet, error) { + notFoundLatestBackup := func() (*BackupActionSet, error) { + return nil, intctrlutil.NewFatalError(fmt.Sprintf(`can not found latest full or incremental backup based on backupPolicy "%s" and specified restoreTime "%s"`, continuousBackup.Spec.BackupPolicyName, restoreTime)) } if continuousBackup.GetStartTime().IsZero() { - return notFoundLatestFullBackup() + return notFoundLatestBackup() } - // 1. list completed full backups - backupItems, err := r.listCompletedFullBackups(reqCtx, cli, continuousBackup) + // 1. list completed backups + // full backups + fullBackupItems, err := r.listCompletedBackups(reqCtx, cli, continuousBackup, dpv1alpha1.BackupTypeFull) if err != nil { return nil, err } - + // incremental backups + incrementalBackupItems, err := r.listCompletedBackups(reqCtx, cli, continuousBackup, dpv1alpha1.BackupTypeIncremental) + if err != nil { + return nil, err + } + backupItems := []dpv1alpha1.Backup{} + backupItems = append(backupItems, fullBackupItems...) + backupItems = append(backupItems, incrementalBackupItems...) // sort by completed time in descending order sort.Slice(backupItems, func(i, j int) bool { i, j = j, i - return CompareWithBackupStopTime(backupItems[i], backupItems[j]) + return utils.CompareWithBackupStopTime(backupItems[i], backupItems[j]) }) // 2. get the latest backup object - var latestFullBackup *dpv1alpha1.Backup + var latestBackup *dpv1alpha1.Backup for _, item := range backupItems { - fullBackupStopTime := item.GetEndTime() - // latest full backup rules: - // 1. Full backup's stopTime must after Continuous backup's startTime. + backupStopTime := item.GetEndTime() + // latest backup rules: + // 1. Full or Incremental backup's stopTime must after Continuous backup's startTime. // Even if the seconds are the same, the data may not be continuous. - // 2. RestoreTime should after the Full backup's stopTime. - if fullBackupStopTime != nil && - !restoreTime.Before(fullBackupStopTime) && - !fullBackupStopTime.Before(continuousBackup.GetStartTime()) { - latestFullBackup = &item + // 2. RestoreTime should after the Full or Incremental backup's stopTime. + if backupStopTime != nil && + !restoreTime.Before(backupStopTime) && + !backupStopTime.Before(continuousBackup.GetStartTime()) { + latestBackup = &item break } } - if latestFullBackup == nil { - return notFoundLatestFullBackup() + if latestBackup == nil { + return notFoundLatestBackup() } // 3. get the action set var actionSetName string - if latestFullBackup.Status.BackupMethod != nil { - actionSetName = latestFullBackup.Status.BackupMethod.ActionSetName + if latestBackup.Status.BackupMethod != nil { + actionSetName = latestBackup.Status.BackupMethod.ActionSetName } actionSet, err := utils.GetActionSetByName(reqCtx, cli, actionSetName) if err != nil { return nil, err } - return &BackupActionSet{Backup: latestFullBackup, ActionSet: actionSet}, nil + return &BackupActionSet{Backup: latestBackup, ActionSet: actionSet}, nil } -func (r *RestoreManager) listCompletedFullBackups(reqCtx intctrlutil.RequestCtx, cli client.Client, continuousBackup *dpv1alpha1.Backup) ([]dpv1alpha1.Backup, error) { +func (r *RestoreManager) listCompletedBackups(reqCtx intctrlutil.RequestCtx, cli client.Client, continuousBackup *dpv1alpha1.Backup, backupType dpv1alpha1.BackupType) ([]dpv1alpha1.Backup, error) { matchingLabels := map[string]string{ - dptypes.BackupTypeLabelKey: string(dpv1alpha1.BackupTypeFull), + dptypes.BackupTypeLabelKey: string(backupType), } if clusterUID := continuousBackup.Labels[dptypes.ClusterUIDLabelKey]; clusterUID != "" { matchingLabels[dptypes.ClusterUIDLabelKey] = clusterUID diff --git a/pkg/dataprotection/restore/utils.go b/pkg/dataprotection/restore/utils.go index 3feabede308..bea8d944095 100644 --- a/pkg/dataprotection/restore/utils.go +++ b/pkg/dataprotection/restore/utils.go @@ -22,6 +22,7 @@ package restore import ( "encoding/json" "fmt" + "path/filepath" "slices" "strconv" "strings" @@ -187,21 +188,6 @@ func transformTimeWithZone(targetTime *metav1.Time, timeZone string) (*metav1.Ti return &metav1.Time{Time: targetTime.In(zone)}, nil } -func CompareWithBackupStopTime(backupI, backupJ dpv1alpha1.Backup) bool { - endTimeI := backupI.GetEndTime() - endTimeJ := backupJ.GetEndTime() - if endTimeI.IsZero() { - return false - } - if endTimeJ.IsZero() { - return true - } - if endTimeI.Equal(endTimeJ) { - return backupI.Name < backupJ.Name - } - return endTimeI.Before(endTimeJ) -} - func BuildJobKeyForActionStatus(jobName string) string { return fmt.Sprintf("%s/%s", constant.JobKind, jobName) } @@ -270,7 +256,7 @@ func ValidateAndInitRestoreMGR(reqCtx intctrlutil.RequestCtx, case dpv1alpha1.BackupTypeFull: restoreMgr.SetBackupSets(*backupSet) case dpv1alpha1.BackupTypeIncremental: - err = restoreMgr.BuildIncrementalBackupActionSets(reqCtx, cli, *backupSet) + err = restoreMgr.BuildIncrementalBackupActionSet(reqCtx, cli, *backupSet) case dpv1alpha1.BackupTypeDifferential: err = restoreMgr.BuildDifferentialBackupActionSets(reqCtx, cli, *backupSet) case dpv1alpha1.BackupTypeContinuous: @@ -412,3 +398,91 @@ func GetVolumeSnapshotsBySourcePod(backup *dpv1alpha1.Backup, target *dpv1alpha1 } return nil } + +// ValidateParentBackupSet validates the parent backup and child backup. +func ValidateParentBackupSet(parentBackupSet *BackupActionSet, backupSet *BackupActionSet) error { + parentBackup := parentBackupSet.Backup + backup := backupSet.Backup + if parentBackup == nil || backup == nil { + return fmt.Errorf("parent backup or child backup is nil") + } + if parentBackup.Status.Phase != dpv1alpha1.BackupPhaseCompleted || + backup.Status.Phase != dpv1alpha1.BackupPhaseCompleted { + return fmt.Errorf("parent backup or child backup is not completed") + } + // validate parent backup policy + if parentBackup.Spec.BackupPolicyName != backup.Spec.BackupPolicyName { + return fmt.Errorf(`parent backup policy: "%s" is defferent with child backup policy: "%s"`, + parentBackup.Spec.BackupPolicyName, backup.Spec.BackupPolicyName) + } + // validate parent backup method and base backup name + var parentBackupType dpv1alpha1.BackupType + if parentBackupSet.ActionSet != nil { + parentBackupType = parentBackupSet.ActionSet.Spec.BackupType + } + switch parentBackupType { + case dpv1alpha1.BackupTypeIncremental: + if parentBackup.Spec.BackupMethod != backup.Spec.BackupMethod { + return fmt.Errorf(`the parent incremental backup method "%s" is not the same with the child backup method "%s"`, + parentBackup.Spec.BackupMethod, backup.Spec.BackupMethod) + } + if parentBackup.Status.BaseBackupName != backup.Status.BaseBackupName { + return fmt.Errorf(`the parent incremental backup base backup "%s" is not the same with the child backup base backup "%s"`, + parentBackup.Status.BaseBackupName, backup.Status.BaseBackupName) + } + case dpv1alpha1.BackupTypeFull: + if parentBackup.Spec.BackupMethod != backup.Status.BackupMethod.CompatibleMethod { + return fmt.Errorf(`the parent full backup method "%s" is not compatible with the child backup method "%s"`, + parentBackup.Spec.BackupMethod, backup.Spec.BackupMethod) + } + if parentBackup.Name != backup.Status.BaseBackupName { + return fmt.Errorf(`the parent full backup base backup "%s" is not the same with the child backup base backup "%s"`, + parentBackup.Name, backup.Status.BaseBackupName) + } + default: + return fmt.Errorf(`the parent backup "%s" is not incremental or full backup`, parentBackup.Name) + } + // validate parent backup end time + if !utils.CompareWithBackupStopTime(*parentBackup, *backup) { + return fmt.Errorf(`the parent backup "%s" is not before the child backup "%s"`, parentBackup.Name, backup.Name) + } + return nil +} + +// GetTargetRelativePath returns the target relative path. +func GetTargetRelativePath(targetName, targetPodName string) string { + targetRelativePath := "" + if targetName != "" { + targetRelativePath = filepath.Join(targetRelativePath, targetName) + } + if targetPodName != "" { + targetRelativePath = filepath.Join(targetRelativePath, targetPodName) + } + // ${targetName}/${targetPodName} + return targetRelativePath +} + +// BackupFilePathEnv returns the envs for backup root path and target relative path. +func BackupFilePathEnv(filePath, targetName, targetPodName string) []corev1.EnvVar { + envs := []corev1.EnvVar{} + if len(filePath) == 0 { + return envs + } + targetRelativePath := GetTargetRelativePath(targetName, targetPodName) + envs = append(envs, []corev1.EnvVar{ + { + Name: dptypes.DPTargetRelativePath, + Value: targetRelativePath, + }, + { + Name: dptypes.DPBackupRootPath, + Value: filepath.Join("/", filePath, "../"), + }, + // construct the backup base path with target relative path + { + Name: dptypes.DPBackupBasePath, + Value: filepath.Join("/", filePath, targetRelativePath), + }, + }...) + return envs +} diff --git a/pkg/dataprotection/types/constant.go b/pkg/dataprotection/types/constant.go index a05c33fce8d..558bba53a84 100644 --- a/pkg/dataprotection/types/constant.go +++ b/pkg/dataprotection/types/constant.go @@ -103,11 +103,23 @@ const ( // DPTargetPodRole the target pod role DPTargetPodRole = "DP_TARGET_POD_ROLE" // DPBackupBasePath the base path for backup data in the storage + // In a backup action pod, it equals ${DP_BACKUP_ROOT_PATH}/${DP_BACKUP_NAME}/${DP_TARGET_RELATIVE_PATH} DPBackupBasePath = "DP_BACKUP_BASE_PATH" + // DPBackupRootPath the root path for backup data + DPBackupRootPath = "DP_BACKUP_ROOT_PATH" + // DPTargetRelativePath the relative path based on the backup data root path + // ${DP_TARGET_RELATIVE_PATH}=${target.name}/${target.selectedTargetPod[*]} + DPTargetRelativePath = "DP_TARGET_RELATIVE_PATH" // DPBackupName backup CR name DPBackupName = "DP_BACKUP_NAME" // DPParentBackupName backup CR name DPParentBackupName = "DP_PARENT_BACKUP_NAME" + // DPBaseBackupName backup CR name + DPBaseBackupName = "DP_BASE_BACKUP_NAME" + // DPAncestorIncrementalBackupNames backup CR names + // Used to restore incremental backups, recording the incremental ancestor backup names in order of end time + // For example: ${DP_ANCESTOR_INCREMENTAL_BACKUP_NAMES}=incrementalBackupName1,incrementalBackupName2,incrementalBackupName3 + DPAncestorIncrementalBackupNames = "DP_ANCESTOR_INCREMENTAL_BACKUP_NAMES" // DPTTL backup time to live, reference the backup.spec.retentionPeriod DPTTL = "DP_TTL" // DPCheckInterval check interval for sync backup progress diff --git a/pkg/dataprotection/utils/utils.go b/pkg/dataprotection/utils/utils.go index 2bbabdc3ba2..363a2855f94 100644 --- a/pkg/dataprotection/utils/utils.go +++ b/pkg/dataprotection/utils/utils.go @@ -251,6 +251,19 @@ func GetBackupType(actionSet *dpv1alpha1.ActionSet, useSnapshot *bool) dpv1alpha return "" } +func GetBackupTypeByMethodName(reqCtx intctrlutil.RequestCtx, cli client.Client, methodName string, + backupPolicy *dpv1alpha1.BackupPolicy) (dpv1alpha1.BackupType, error) { + backupMethod := GetBackupMethodByName(methodName, backupPolicy) + if backupMethod == nil { + return "", nil + } + actionSet, err := GetActionSetByName(reqCtx, cli, backupMethod.ActionSetName) + if err != nil { + return "", err + } + return GetBackupType(actionSet, backupMethod.SnapshotVolumes), nil +} + // PrependSpaces prepends spaces to each line of the content. func PrependSpaces(content string, spaces int) string { prefix := "" @@ -371,3 +384,18 @@ func GetBackupStatusTarget(backupObj *dpv1alpha1.Backup, sourceTargetName string } return nil } + +func CompareWithBackupStopTime(backupI, backupJ dpv1alpha1.Backup) bool { + endTimeI := backupI.GetEndTime() + endTimeJ := backupJ.GetEndTime() + if endTimeI.IsZero() { + return false + } + if endTimeJ.IsZero() { + return true + } + if endTimeI.Equal(endTimeJ) { + return backupI.Name < backupJ.Name + } + return endTimeI.Before(endTimeJ) +} diff --git a/pkg/testutil/dataprotection/backup_utils.go b/pkg/testutil/dataprotection/backup_utils.go index 2aaa330bd84..45bcc85903c 100644 --- a/pkg/testutil/dataprotection/backup_utils.go +++ b/pkg/testutil/dataprotection/backup_utils.go @@ -42,9 +42,14 @@ import ( viper "github.com/apecloud/kubeblocks/pkg/viperx" ) -func NewFakeActionSet(testCtx *testutil.TestContext) *dpv1alpha1.ActionSet { +func NewFakeActionSet(testCtx *testutil.TestContext, change func(as *dpv1alpha1.ActionSet)) *dpv1alpha1.ActionSet { as := testapps.CreateCustomizedObj(testCtx, "backup/actionset.yaml", - &dpv1alpha1.ActionSet{}, testapps.WithName(ActionSetName)) + &dpv1alpha1.ActionSet{}, func(obj *dpv1alpha1.ActionSet) { + obj.Name = ActionSetName + if change != nil { + change(obj) + } + }) Eventually(testapps.CheckObj(testCtx, client.ObjectKeyFromObject(as), func(g Gomega, as *dpv1alpha1.ActionSet) { g.Expect(as.Status.Phase).Should(BeEquivalentTo(dpv1alpha1.AvailablePhase)) @@ -65,6 +70,10 @@ func NewFakeBackupPolicy(testCtx *testutil.TestContext, AddBackupMethod(BackupMethodName, false, ActionSetName). SetBackupMethodVolumeMounts(DataVolumeName, DataVolumeMountPath, LogVolumeName, LogVolumeMountPath). + AddBackupMethod(IncBackupMethodName, false, IncActionSetName). + SetBackupMethodVolumeMounts(DataVolumeName, DataVolumeMountPath, + LogVolumeName, LogVolumeMountPath). + SetBackupMethodCompatibleMethod(BackupMethodName). AddBackupMethod(VSBackupMethodName, true, ""). SetBackupMethodVolumes([]string{DataVolumeName}). Apply(change). @@ -274,6 +283,9 @@ func MockBackupStatusMethod(backup *dpv1alpha1.Backup, backupMethodName, targetV }, }, } + if backupMethodName == IncBackupMethodName { + backup.Status.BackupMethod.CompatibleMethod = BackupMethodName + } } func MockBackupStatusTarget(backup *dpv1alpha1.Backup, podSelectionStrategy dpv1alpha1.PodSelectionStrategy) { diff --git a/pkg/testutil/dataprotection/backuppolicy_factory.go b/pkg/testutil/dataprotection/backuppolicy_factory.go index 669ddf5df4c..eba99c86520 100644 --- a/pkg/testutil/dataprotection/backuppolicy_factory.go +++ b/pkg/testutil/dataprotection/backuppolicy_factory.go @@ -68,6 +68,11 @@ func (f *MockBackupPolicyFactory) AddBackupMethod(name string, return f } +func (f *MockBackupPolicyFactory) SetBackupMethodCompatibleMethod(name string) *MockBackupPolicyFactory { + f.Get().Spec.BackupMethods[len(f.Get().Spec.BackupMethods)-1].CompatibleMethod = name + return f +} + func (f *MockBackupPolicyFactory) SetBackupMethodVolumes(names []string) *MockBackupPolicyFactory { f.Get().Spec.BackupMethods[len(f.Get().Spec.BackupMethods)-1].TargetVolumes.Volumes = names return f diff --git a/pkg/testutil/dataprotection/constant.go b/pkg/testutil/dataprotection/constant.go index e8829274e15..c5dc2616992 100644 --- a/pkg/testutil/dataprotection/constant.go +++ b/pkg/testutil/dataprotection/constant.go @@ -24,13 +24,15 @@ const ( ComponentName = "test-comp" ContainerName = "test-container" - BackupName = "test-backup" - BackupRepoName = "test-repo" - BackupPolicyName = "test-backup-policy" - BackupMethodName = "xtrabackup" - VSBackupMethodName = "volume-snapshot" - BackupPathPrefix = "/backup" - ActionSetName = "xtrabackup" + BackupName = "test-backup" + BackupRepoName = "test-repo" + BackupPolicyName = "test-backup-policy" + BackupMethodName = "xtrabackup" + IncBackupMethodName = "xtrabackup-inc" + VSBackupMethodName = "volume-snapshot" + BackupPathPrefix = "/backup" + ActionSetName = "xtrabackup" + IncActionSetName = "xtrabackup-inc" DataVolumeName = "data" DataVolumeMountPath = "/data" diff --git a/pkg/testutil/dataprotection/utils.go b/pkg/testutil/dataprotection/utils.go index cc4be70f73a..4e99489b4c4 100644 --- a/pkg/testutil/dataprotection/utils.go +++ b/pkg/testutil/dataprotection/utils.go @@ -58,3 +58,10 @@ func PatchBackupStatus(testCtx *testutil.TestContext, key client.ObjectKey, stat fetched.Status = status })).Should(Succeed()) } + +func NewFakeIncActionSet(testCtx *testutil.TestContext) *dpv1alpha1.ActionSet { + return NewFakeActionSet(testCtx, func(as *dpv1alpha1.ActionSet) { + as.Name = IncActionSetName + as.Spec.BackupType = dpv1alpha1.BackupTypeIncremental + }) +}