Skip to content

Commit

Permalink
Merge pull request #1428 from AlexLast/master
Browse files Browse the repository at this point in the history
Restore DBCluster from S3 snapshot
  • Loading branch information
haarchri authored Aug 23, 2022
2 parents d95f224 + adfbd61 commit 4040d5c
Show file tree
Hide file tree
Showing 4 changed files with 295 additions and 4 deletions.
35 changes: 35 additions & 0 deletions apis/rds/v1alpha1/custom_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,41 @@ type CustomDBClusterParameters struct {
//
// By default, this parameter is disabled.
ApplyImmediately *bool `json:"applyImmediately,omitempty"`

// RestoreFrom specifies the details of the backup to restore when creating a new DBCluster.
// +optional
RestoreFrom *RestoreBackupConfiguration `json:"restoreFrom,omitempty"`
}

// S3RestoreBackupConfiguration defines the details of the S3 backup to restore from.
type S3RestoreBackupConfiguration struct {
// BucketName is the name of the S3 bucket containing the backup to restore.
BucketName *string `json:"bucketName"`

// IngestionRoleARN is the IAM role RDS can assume that will allow it to access the contents of the S3 bucket.
IngestionRoleARN *string `json:"ingestionRoleARN"`

// Prefix is the path prefix of the S3 bucket within which the backup to restore is located.
// +optional
Prefix *string `json:"prefix,omitempty"`

// SourceEngine is the engine used to create the backup.
// Must be "mysql".
SourceEngine *string `json:"sourceEngine"`

// SourceEngineVersion is the version of the engine used to create the backup.
// Example: "5.7.30"
SourceEngineVersion *string `json:"sourceEngineVersion"`
}

// RestoreBackupConfiguration defines the backup to restore a new DBCluster from.
type RestoreBackupConfiguration struct {
// S3 specifies the details of the S3 backup to restore from.
// +optional
S3 *S3RestoreBackupConfiguration `json:"s3,omitempty"`

// Source is the type of the backup to restore when creating a new DBCluster. Only S3 is supported at present.
Source *string `json:"source"`
}

// CustomGlobalClusterParameters are custom parameters for a GlobalCluster
Expand Down
70 changes: 70 additions & 0 deletions apis/rds/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

42 changes: 42 additions & 0 deletions package/crds/rds.aws.crossplane.io_dbclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -674,6 +674,48 @@ spec:
description: The Amazon Resource Name (ARN) of the source DB instance
or DB cluster if this DB cluster is created as a read replica.
type: string
restoreFrom:
description: RestoreFrom specifies the details of the backup to
restore when creating a new DBCluster.
properties:
s3:
description: S3 specifies the details of the S3 backup to
restore from.
properties:
bucketName:
description: BucketName is the name of the S3 bucket containing
the backup to restore.
type: string
ingestionRoleARN:
description: IngestionRoleARN is the IAM role RDS can
assume that will allow it to access the contents of
the S3 bucket.
type: string
prefix:
description: Prefix is the path prefix of the S3 bucket
within which the backup to restore is located.
type: string
sourceEngine:
description: SourceEngine is the engine used to create
the backup. Must be "mysql".
type: string
sourceEngineVersion:
description: 'SourceEngineVersion is the version of the
engine used to create the backup. Example: "5.7.30"'
type: string
required:
- bucketName
- ingestionRoleARN
- sourceEngine
- sourceEngineVersion
type: object
source:
description: Source is the type of the backup to restore when
creating a new DBCluster. Only S3 is supported at present.
type: string
required:
- source
type: object
scalingConfiguration:
description: For DB clusters in serverless DB engine mode, the
scaling properties of the DB cluster.
Expand Down
152 changes: 148 additions & 4 deletions pkg/controller/rds/dbcluster/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,10 @@ import (

// error constants
const (
errSaveSecretFailed = "failed to save generated password to Kubernetes secret"
errUpdateTags = "cannot update tags"
errSaveSecretFailed = "failed to save generated password to Kubernetes secret"
errUpdateTags = "cannot update tags"
errRestore = "cannot restore DBCluster in AWS"
errUnknownRestoreFromSource = "unknown restoreFrom source"
)

type updater struct {
Expand Down Expand Up @@ -118,7 +120,7 @@ type custom struct {
client svcsdkapi.RDSAPI
}

func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBCluster, obj *svcsdk.CreateDBClusterInput) error {
func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBCluster, obj *svcsdk.CreateDBClusterInput) error { // nolint:gocyclo
pw, _, err := rds.GetPassword(ctx, e.kube, cr.Spec.ForProvider.MasterUserPasswordSecretRef, cr.Spec.WriteConnectionSecretToReference)
if resource.IgnoreNotFound(err) != nil {
return errors.Wrap(err, "cannot get password from the given secret")
Expand All @@ -139,12 +141,154 @@ func (e *custom) preCreate(ctx context.Context, cr *svcapitypes.DBCluster, obj *
for i, v := range cr.Spec.ForProvider.VPCSecurityGroupIDs {
obj.VpcSecurityGroupIds[i] = aws.String(v)
}

if cr.Spec.ForProvider.RestoreFrom != nil {
switch *cr.Spec.ForProvider.RestoreFrom.Source {
case "S3":
input := generateRestoreDBClusterFromS3Input(cr)
input.MasterUserPassword = obj.MasterUserPassword
input.DBClusterIdentifier = obj.DBClusterIdentifier
input.VpcSecurityGroupIds = obj.VpcSecurityGroupIds

if _, err = e.client.RestoreDBClusterFromS3WithContext(ctx, input); err != nil {
return errors.Wrap(err, errRestore)
}
default:
return errors.New(errUnknownRestoreFromSource)
}
}

return nil
}

func generateRestoreDBClusterFromS3Input(cr *svcapitypes.DBCluster) *svcsdk.RestoreDBClusterFromS3Input { // nolint:gocyclo
res := &svcsdk.RestoreDBClusterFromS3Input{}

if cr.Spec.ForProvider.AvailabilityZones != nil {
res.SetAvailabilityZones(cr.Spec.ForProvider.AvailabilityZones)
}

if cr.Spec.ForProvider.BacktrackWindow != nil {
res.SetBacktrackWindow(*cr.Spec.ForProvider.BacktrackWindow)
}

if cr.Spec.ForProvider.BackupRetentionPeriod != nil {
res.SetBackupRetentionPeriod(*cr.Spec.ForProvider.BackupRetentionPeriod)
}

if cr.Spec.ForProvider.CharacterSetName != nil {
res.SetCharacterSetName(*cr.Spec.ForProvider.CharacterSetName)
}

if cr.Spec.ForProvider.CopyTagsToSnapshot != nil {
res.SetCopyTagsToSnapshot(*cr.Spec.ForProvider.CopyTagsToSnapshot)
}

if cr.Spec.ForProvider.DBClusterParameterGroupName != nil {
res.SetDBClusterParameterGroupName(*cr.Spec.ForProvider.DBClusterParameterGroupName)
}

if cr.Spec.ForProvider.DBSubnetGroupName != nil {
res.SetDBSubnetGroupName(*cr.Spec.ForProvider.DBSubnetGroupName)
}

if cr.Spec.ForProvider.DatabaseName != nil {
res.SetDatabaseName(*cr.Spec.ForProvider.DatabaseName)
}

if cr.Spec.ForProvider.DeletionProtection != nil {
res.SetDeletionProtection(*cr.Spec.ForProvider.DeletionProtection)
}

if cr.Spec.ForProvider.Domain != nil {
res.SetDomain(*cr.Spec.ForProvider.Domain)
}

if cr.Spec.ForProvider.DomainIAMRoleName != nil {
res.SetDomainIAMRoleName(*cr.Spec.ForProvider.DomainIAMRoleName)
}

if cr.Spec.ForProvider.EnableCloudwatchLogsExports != nil {
res.SetEnableCloudwatchLogsExports(cr.Spec.ForProvider.EnableCloudwatchLogsExports)
}

if cr.Spec.ForProvider.EnableIAMDatabaseAuthentication != nil {
res.SetEnableIAMDatabaseAuthentication(*cr.Spec.ForProvider.EnableIAMDatabaseAuthentication)
}

if cr.Spec.ForProvider.Engine != nil {
res.SetEngine(*cr.Spec.ForProvider.Engine)
}

if cr.Spec.ForProvider.EngineVersion != nil {
res.SetEngineVersion(*cr.Spec.ForProvider.EngineVersion)
}

if cr.Spec.ForProvider.KMSKeyID != nil {
res.SetKmsKeyId(*cr.Spec.ForProvider.KMSKeyID)
}

if cr.Spec.ForProvider.MasterUsername != nil {
res.SetMasterUsername(*cr.Spec.ForProvider.MasterUsername)
}

if cr.Spec.ForProvider.OptionGroupName != nil {
res.SetOptionGroupName(*cr.Spec.ForProvider.OptionGroupName)
}

if cr.Spec.ForProvider.Port != nil {
res.SetPort(*cr.Spec.ForProvider.Port)
}

if cr.Spec.ForProvider.PreferredBackupWindow != nil {
res.SetPreferredBackupWindow(*cr.Spec.ForProvider.PreferredBackupWindow)
}

if cr.Spec.ForProvider.PreferredMaintenanceWindow != nil {
res.SetPreferredMaintenanceWindow(*cr.Spec.ForProvider.PreferredMaintenanceWindow)
}

if cr.Spec.ForProvider.StorageEncrypted != nil {
res.SetStorageEncrypted(*cr.Spec.ForProvider.StorageEncrypted)
}

if cr.Spec.ForProvider.RestoreFrom != nil && cr.Spec.ForProvider.RestoreFrom.S3 != nil {
if cr.Spec.ForProvider.RestoreFrom.S3.BucketName != nil {
res.SetS3BucketName(*cr.Spec.ForProvider.RestoreFrom.S3.BucketName)
}

if cr.Spec.ForProvider.RestoreFrom.S3.IngestionRoleARN != nil {
res.SetS3IngestionRoleArn(*cr.Spec.ForProvider.RestoreFrom.S3.IngestionRoleARN)
}

if cr.Spec.ForProvider.RestoreFrom.S3.Prefix != nil {
res.SetS3Prefix(*cr.Spec.ForProvider.RestoreFrom.S3.Prefix)
}

if cr.Spec.ForProvider.RestoreFrom.S3.SourceEngine != nil {
res.SetSourceEngine(*cr.Spec.ForProvider.RestoreFrom.S3.SourceEngine)
}

if cr.Spec.ForProvider.RestoreFrom.S3.SourceEngineVersion != nil {
res.SetSourceEngineVersion(*cr.Spec.ForProvider.RestoreFrom.S3.SourceEngineVersion)
}
}

if cr.Spec.ForProvider.Tags != nil {
var tags []*svcsdk.Tag
for _, tag := range cr.Spec.ForProvider.Tags {
tags = append(tags, &svcsdk.Tag{Key: tag.Key, Value: tag.Value})
}

res.SetTags(tags)
}

return res
}

func isUpToDate(cr *svcapitypes.DBCluster, out *svcsdk.DescribeDBClustersOutput) (bool, error) { // nolint:gocyclo
status := aws.StringValue(out.DBClusters[0].Status)
if status == "modifying" || status == "upgrading" || status == "configuring-iam-database-auth" {
if status == "modifying" || status == "upgrading" || status == "configuring-iam-database-auth" || status == "migrating" || status == "prepairing-data-migration" {
return true, nil
}

Expand Down

0 comments on commit 4040d5c

Please sign in to comment.