diff --git a/examples/resources/biganimal_analytics_cluster/aws/resource.tf b/examples/resources/biganimal_analytics_cluster/aws/resource.tf index db730995..78d1ef07 100644 --- a/examples/resources/biganimal_analytics_cluster/aws/resource.tf +++ b/examples/resources/biganimal_analytics_cluster/aws/resource.tf @@ -54,7 +54,8 @@ resource "biganimal_analytics_cluster" "analytics_cluster" { ] backup_retention_period = "30d" - csp_auth = false + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 + csp_auth = false instance_type = "aws:m6id.12xlarge" password = resource.random_password.password.result diff --git a/examples/resources/biganimal_cluster/ha/resource.tf b/examples/resources/biganimal_cluster/ha/resource.tf index 45600681..988e52e2 100644 --- a/examples/resources/biganimal_cluster/ha/resource.tf +++ b/examples/resources/biganimal_cluster/ha/resource.tf @@ -44,6 +44,7 @@ resource "biganimal_cluster" "ha_cluster" { ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { id = "ha" nodes = 3 diff --git a/examples/resources/biganimal_cluster/single_node/aws/resource.tf b/examples/resources/biganimal_cluster/single_node/aws/resource.tf index 5ca4f7e5..06fc06a1 100644 --- a/examples/resources/biganimal_cluster/single_node/aws/resource.tf +++ b/examples/resources/biganimal_cluster/single_node/aws/resource.tf @@ -44,6 +44,7 @@ resource "biganimal_cluster" "single_node_cluster" { ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { id = "single" nodes = 1 diff --git a/examples/resources/biganimal_cluster/single_node/azure/resource.tf b/examples/resources/biganimal_cluster/single_node/azure/resource.tf index fc6d4abe..9bcf0f43 100644 --- a/examples/resources/biganimal_cluster/single_node/azure/resource.tf +++ b/examples/resources/biganimal_cluster/single_node/azure/resource.tf @@ -44,6 +44,7 @@ resource "biganimal_cluster" "single_node_cluster" { ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { id = "single" nodes = 1 diff --git a/examples/resources/biganimal_cluster/single_node/gcp/resource.tf b/examples/resources/biganimal_cluster/single_node/gcp/resource.tf index e6ad7932..d473f5b4 100644 --- a/examples/resources/biganimal_cluster/single_node/gcp/resource.tf +++ b/examples/resources/biganimal_cluster/single_node/gcp/resource.tf @@ -44,6 +44,7 @@ resource "biganimal_cluster" "single_node_cluster" { ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { id = "single" nodes = 1 diff --git a/examples/resources/biganimal_faraway_replica/aws/resource.tf b/examples/resources/biganimal_faraway_replica/aws/resource.tf index e9826b08..6eea61bd 100644 --- a/examples/resources/biganimal_faraway_replica/aws/resource.tf +++ b/examples/resources/biganimal_faraway_replica/aws/resource.tf @@ -43,8 +43,9 @@ resource "biganimal_faraway_replica" "faraway_replica" { ] backup_retention_period = "8d" - csp_auth = false - instance_type = "aws:c6i.large" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 + csp_auth = false + instance_type = "aws:c6i.large" // only following pg_config parameters are configurable for faraway replica // max_connections, max_locks_per_transaction, max_prepared_transactions, max_wal_senders, max_worker_processes. diff --git a/examples/resources/biganimal_faraway_replica/azure/resource.tf b/examples/resources/biganimal_faraway_replica/azure/resource.tf index 7b1c98a4..cd9a6543 100644 --- a/examples/resources/biganimal_faraway_replica/azure/resource.tf +++ b/examples/resources/biganimal_faraway_replica/azure/resource.tf @@ -43,8 +43,9 @@ resource "biganimal_faraway_replica" "faraway_replica" { ] backup_retention_period = "8d" - csp_auth = false - instance_type = "azure:Standard_D2s_v3" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 + csp_auth = false + instance_type = "azure:Standard_D2s_v3" // only following pg_config parameters are configurable for faraway replica // max_connections, max_locks_per_transaction, max_prepared_transactions, max_wal_senders, max_worker_processes. diff --git a/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf b/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf index 3b4df3bf..74db7bf2 100644 --- a/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf +++ b/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf @@ -79,8 +79,9 @@ resource "biganimal_faraway_replica" "faraway_replica" { ] backup_retention_period = "8d" - csp_auth = false - instance_type = "azure:Standard_D2s_v3" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 + csp_auth = false + instance_type = "azure:Standard_D2s_v3" // only following pg_config parameters are configurable for faraway replica // max_connections, max_locks_per_transaction, max_prepared_transactions, max_wal_senders, max_worker_processes. diff --git a/examples/resources/biganimal_faraway_replica/gcp/resource.tf b/examples/resources/biganimal_faraway_replica/gcp/resource.tf index e9994cfd..e2ac9242 100644 --- a/examples/resources/biganimal_faraway_replica/gcp/resource.tf +++ b/examples/resources/biganimal_faraway_replica/gcp/resource.tf @@ -43,8 +43,9 @@ resource "biganimal_faraway_replica" "faraway_replica" { ] backup_retention_period = "8d" - csp_auth = false - instance_type = "gcp:e2-highcpu-4" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 + csp_auth = false + instance_type = "gcp:e2-highcpu-4" // only following pg_config parameters are configurable for faraway replica // max_connections, max_locks_per_transaction, max_prepared_transactions, max_wal_senders, max_worker_processes. diff --git a/examples/resources/biganimal_pgd/aws/data_group/resource.tf b/examples/resources/biganimal_pgd/aws/data_group/resource.tf index 4a672731..f029b334 100644 --- a/examples/resources/biganimal_pgd/aws/data_group/resource.tf +++ b/examples/resources/biganimal_pgd/aws/data_group/resource.tf @@ -54,6 +54,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 diff --git a/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf index 8bae42df..09ef74f7 100644 --- a/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf +++ b/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf @@ -54,6 +54,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 @@ -111,6 +112,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 diff --git a/examples/resources/biganimal_pgd/azure/data_group/resource.tf b/examples/resources/biganimal_pgd/azure/data_group/resource.tf index 2cecef13..3a66f61f 100644 --- a/examples/resources/biganimal_pgd/azure/data_group/resource.tf +++ b/examples/resources/biganimal_pgd/azure/data_group/resource.tf @@ -54,6 +54,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 diff --git a/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf index cb181ec1..5dbed951 100644 --- a/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf +++ b/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf @@ -54,6 +54,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 @@ -111,6 +112,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 diff --git a/examples/resources/biganimal_pgd/gcp/data_group/resource.tf b/examples/resources/biganimal_pgd/gcp/data_group/resource.tf index 64281147..4fa2cb2f 100644 --- a/examples/resources/biganimal_pgd/gcp/data_group/resource.tf +++ b/examples/resources/biganimal_pgd/gcp/data_group/resource.tf @@ -54,6 +54,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 diff --git a/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf index cd2bf536..81884dad 100644 --- a/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf +++ b/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf @@ -54,6 +54,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 @@ -115,6 +116,7 @@ resource "biganimal_pgd" "pgd_cluster" { }, ] backup_retention_period = "6d" + # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05 cluster_architecture = { cluster_architecture_id = "pgd" nodes = 3 diff --git a/pkg/models/cluster.go b/pkg/models/cluster.go index 7da9e738..6682fa9c 100644 --- a/pkg/models/cluster.go +++ b/pkg/models/cluster.go @@ -186,7 +186,7 @@ type Cluster struct { EncryptionKeyIdReq *string `json:"keyId,omitempty"` EncryptionKeyResp *EncryptionKey `json:"encryptionKey,omitempty"` PgIdentity *string `json:"pgIdentity,omitempty"` - BackupSchedule *commonApi.BackupSchedule `json:"backupSchedule,omitempty"` + BackupScheduleTime *string `json:"scheduleBackup,omitempty"` } // IsHealthy checks to see if the cluster has the right condition 'biganimal.com/deployed' diff --git a/pkg/models/common/api/backup_schedule.go b/pkg/models/common/api/backup_schedule.go deleted file mode 100644 index e27daf2a..00000000 --- a/pkg/models/common/api/backup_schedule.go +++ /dev/null @@ -1,6 +0,0 @@ -package api - -type BackupSchedule struct { - StartDay *float64 `json:"startDay,omitempty"` - StartTime *string `json:"startTime,omitempty"` -} diff --git a/pkg/models/common/terraform/backup_schedule.go b/pkg/models/common/terraform/backup_schedule.go deleted file mode 100644 index 30722847..00000000 --- a/pkg/models/common/terraform/backup_schedule.go +++ /dev/null @@ -1,8 +0,0 @@ -package terraform - -import "github.com/hashicorp/terraform-plugin-framework/types" - -type BackupSchedule struct { - StartDay types.String `tfsdk:"start_day"` - StartTime types.String `tfsdk:"start_time"` -} diff --git a/pkg/models/pgd/api/data_group.go b/pkg/models/pgd/api/data_group.go index 8d86f658..b1857af7 100644 --- a/pkg/models/pgd/api/data_group.go +++ b/pkg/models/pgd/api/data_group.go @@ -2,7 +2,6 @@ package api import ( "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models" - commonApi "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/common/api" ) type DataGroup struct { @@ -33,5 +32,5 @@ type DataGroup struct { PeAllowedPrincipalIds *[]string `json:"peAllowedPrincipalIds,omitempty"` RoConnectionUri *string `json:"roConnectionUri,omitempty"` ReadOnlyConnections *bool `json:"readOnlyConnections,omitempty"` - BackupSchedule *commonApi.BackupSchedule `json:"backupSchedule,omitempty"` + BackupScheduleTime *string `json:"scheduleBackup,omitempty"` } diff --git a/pkg/models/pgd/terraform/data_group.go b/pkg/models/pgd/terraform/data_group.go index b8169b3b..23c6e4bc 100644 --- a/pkg/models/pgd/terraform/data_group.go +++ b/pkg/models/pgd/terraform/data_group.go @@ -2,37 +2,36 @@ package terraform import ( "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models" - commonTerraform "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/common/terraform" "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/pgd/api" "github.com/hashicorp/terraform-plugin-framework/types" ) type DataGroup struct { - GroupId types.String `tfsdk:"group_id"` - AllowedIpRanges types.Set `tfsdk:"allowed_ip_ranges"` - BackupRetentionPeriod *string `tfsdk:"backup_retention_period"` - ClusterArchitecture *ClusterArchitecture `tfsdk:"cluster_architecture"` - ClusterName types.String `tfsdk:"cluster_name"` - ClusterType types.String `tfsdk:"cluster_type"` - Connection types.String `tfsdk:"connection_uri"` - CreatedAt types.String `tfsdk:"created_at"` - CspAuth *bool `tfsdk:"csp_auth"` - InstanceType *api.InstanceType `tfsdk:"instance_type"` - LogsUrl types.String `tfsdk:"logs_url"` - MetricsUrl types.String `tfsdk:"metrics_url"` - PgConfig *[]models.KeyValue `tfsdk:"pg_config"` - PgType *api.PgType `tfsdk:"pg_type"` - PgVersion *api.PgVersion `tfsdk:"pg_version"` - Phase types.String `tfsdk:"phase"` - PrivateNetworking *bool `tfsdk:"private_networking"` - Provider *api.CloudProvider `tfsdk:"cloud_provider"` - Region *api.Region `tfsdk:"region"` - ResizingPvc types.Set `tfsdk:"resizing_pvc"` - Storage *Storage `tfsdk:"storage"` - MaintenanceWindow *models.MaintenanceWindow `tfsdk:"maintenance_window"` - ServiceAccountIds types.Set `tfsdk:"service_account_ids"` - PeAllowedPrincipalIds types.Set `tfsdk:"pe_allowed_principal_ids"` - RoConnectionUri types.String `tfsdk:"ro_connection_uri"` - ReadOnlyConnections *bool `tfsdk:"read_only_connections"` - BackupSchedule *commonTerraform.BackupSchedule `tfsdk:"backup_schedule"` + GroupId types.String `tfsdk:"group_id"` + AllowedIpRanges types.Set `tfsdk:"allowed_ip_ranges"` + BackupRetentionPeriod *string `tfsdk:"backup_retention_period"` + ClusterArchitecture *ClusterArchitecture `tfsdk:"cluster_architecture"` + ClusterName types.String `tfsdk:"cluster_name"` + ClusterType types.String `tfsdk:"cluster_type"` + Connection types.String `tfsdk:"connection_uri"` + CreatedAt types.String `tfsdk:"created_at"` + CspAuth *bool `tfsdk:"csp_auth"` + InstanceType *api.InstanceType `tfsdk:"instance_type"` + LogsUrl types.String `tfsdk:"logs_url"` + MetricsUrl types.String `tfsdk:"metrics_url"` + PgConfig *[]models.KeyValue `tfsdk:"pg_config"` + PgType *api.PgType `tfsdk:"pg_type"` + PgVersion *api.PgVersion `tfsdk:"pg_version"` + Phase types.String `tfsdk:"phase"` + PrivateNetworking *bool `tfsdk:"private_networking"` + Provider *api.CloudProvider `tfsdk:"cloud_provider"` + Region *api.Region `tfsdk:"region"` + ResizingPvc types.Set `tfsdk:"resizing_pvc"` + Storage *Storage `tfsdk:"storage"` + MaintenanceWindow *models.MaintenanceWindow `tfsdk:"maintenance_window"` + ServiceAccountIds types.Set `tfsdk:"service_account_ids"` + PeAllowedPrincipalIds types.Set `tfsdk:"pe_allowed_principal_ids"` + RoConnectionUri types.String `tfsdk:"ro_connection_uri"` + ReadOnlyConnections *bool `tfsdk:"read_only_connections"` + BackupScheduleTime types.String `tfsdk:"backup_schedule_time"` } diff --git a/pkg/provider/common.go b/pkg/provider/common.go index 88936ab3..743bba6d 100644 --- a/pkg/provider/common.go +++ b/pkg/provider/common.go @@ -8,26 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" ) -var WeekdaysNumber = map[string]float64{ - "monday": 1.0, - "tuesday": 2.0, - "wednesday": 3.0, - "thursday": 4.0, - "friday": 5.0, - "saturday": 6.0, - "sunday": 0.0, -} - -var WeekdaysName = map[float64]string{ - 1.0: "Monday", - 2.0: "Tuesday", - 3.0: "Wednesday", - 4.0: "Thursday", - 5.0: "Friday", - 6.0: "Saturday", - 0.0: "Sunday", -} - // build tag assign terraform resource as, using api response as input func buildTFRsrcAssignTagsAs(tfRsrcTagsOut *[]commonTerraform.Tag, apiRespTags []commonApi.Tag) { *tfRsrcTagsOut = []commonTerraform.Tag{} @@ -53,17 +33,8 @@ func buildAPIReqAssignTags(tfRsrcTags []commonTerraform.Tag) []commonApi.Tag { return tags } -var resourceBackupSchedule = schema.SingleNestedAttribute{ - Description: "Backup schedule.", - Optional: true, - Attributes: map[string]schema.Attribute{ - "start_day": schema.StringAttribute{ - Description: "Backup schedule start day.", - Required: true, - }, - "start_time": schema.StringAttribute{ - Description: "Backup schedule start time.", - Required: true, - }, - }, +var ResourceBackupScheduleTime = schema.StringAttribute{ + MarkdownDescription: "Backup schedule time in 24 hour format.", + Optional: true, + Computed: true, } diff --git a/pkg/provider/resource_analytics_cluster.go b/pkg/provider/resource_analytics_cluster.go index f6804e7a..f85353c3 100644 --- a/pkg/provider/resource_analytics_cluster.go +++ b/pkg/provider/resource_analytics_cluster.go @@ -60,7 +60,7 @@ type analyticsClusterResourceModel struct { PeAllowedPrincipalIds types.Set `tfsdk:"pe_allowed_principal_ids"` Pause types.Bool `tfsdk:"pause"` Tags []commonTerraform.Tag `tfsdk:"tags"` - BackupSchedule *commonTerraform.BackupSchedule `tfsdk:"backup_schedule"` + BackupScheduleTime types.String `tfsdk:"backup_schedule_time"` Timeouts timeouts.Value `tfsdk:"timeouts"` } @@ -302,7 +302,7 @@ func (r *analyticsClusterResource) Schema(ctx context.Context, req resource.Sche plan_modifier.CustomAssignTags(), }, }, - "backup_schedule": resourceBackupSchedule, + "backup_schedule_time": ResourceBackupScheduleTime, }, } } @@ -408,6 +408,7 @@ func generateAnalyticsClusterModelCreate(ctx context.Context, client *api.Cluste CSPAuth: clusterResource.CspAuth.ValueBoolPointer(), PrivateNetworking: clusterResource.PrivateNetworking.ValueBoolPointer(), BackupRetentionPeriod: clusterResource.BackupRetentionPeriod.ValueStringPointer(), + BackupScheduleTime: clusterResource.BackupScheduleTime.ValueStringPointer(), } cluster.ClusterId = nil @@ -480,13 +481,6 @@ func generateAnalyticsClusterModelCreate(ctx context.Context, client *api.Cluste cluster.Tags = buildAPIReqAssignTags(clusterResource.Tags) - if clusterResource.BackupSchedule != nil { - cluster.BackupSchedule = &commonApi.BackupSchedule{ - StartDay: utils.ToPointer(WeekdaysNumber[clusterResource.BackupSchedule.StartDay.ValueString()]), - StartTime: clusterResource.BackupSchedule.StartTime.ValueStringPointer(), - } - } - return cluster, nil } @@ -514,6 +508,7 @@ func readAnalyticsCluster(ctx context.Context, client *api.ClusterClient, tfClus tfClusterResource.LogsUrl = responseCluster.LogsUrl tfClusterResource.MetricsUrl = responseCluster.MetricsUrl tfClusterResource.BackupRetentionPeriod = types.StringPointerValue(responseCluster.BackupRetentionPeriod) + tfClusterResource.BackupScheduleTime = types.StringPointerValue(responseCluster.BackupScheduleTime) tfClusterResource.PgVersion = types.StringValue(responseCluster.PgVersion.PgVersionId) tfClusterResource.PgType = types.StringValue(responseCluster.PgType.PgTypeId) tfClusterResource.PrivateNetworking = types.BoolPointerValue(responseCluster.PrivateNetworking) @@ -555,13 +550,6 @@ func readAnalyticsCluster(ctx context.Context, client *api.ClusterClient, tfClus buildTFRsrcAssignTagsAs(&tfClusterResource.Tags, responseCluster.Tags) - if responseCluster.BackupSchedule != nil { - tfClusterResource.BackupSchedule = &commonTerraform.BackupSchedule{ - StartDay: types.StringValue(WeekdaysName[*responseCluster.BackupSchedule.StartDay]), - StartTime: types.StringPointerValue(responseCluster.BackupSchedule.StartTime), - } - } - return nil } diff --git a/pkg/provider/resource_cluster.go b/pkg/provider/resource_cluster.go index e6fa516b..e0fcc5b4 100644 --- a/pkg/provider/resource_cluster.go +++ b/pkg/provider/resource_cluster.go @@ -82,7 +82,7 @@ type ClusterResourceModel struct { VolumeSnapshot types.Bool `tfsdk:"volume_snapshot_backup"` Tags []commonTerraform.Tag `tfsdk:"tags"` ServiceName types.String `tfsdk:"service_name"` - BackupSchedule *commonTerraform.BackupSchedule `tfsdk:"backup_schedule"` + BackupScheduleTime types.String `tfsdk:"backup_schedule_time"` Timeouts timeouts.Value `tfsdk:"timeouts"` } @@ -576,7 +576,7 @@ func (c *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest Computed: true, PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, }, - "backup_schedule": resourceBackupSchedule, + "backup_schedule_time": ResourceBackupScheduleTime, }, } } @@ -858,6 +858,7 @@ func readCluster(ctx context.Context, client *api.ClusterClient, tfClusterResour tfClusterResource.LogsUrl = responseCluster.LogsUrl tfClusterResource.MetricsUrl = responseCluster.MetricsUrl tfClusterResource.BackupRetentionPeriod = types.StringPointerValue(responseCluster.BackupRetentionPeriod) + tfClusterResource.BackupScheduleTime = types.StringPointerValue(responseCluster.BackupScheduleTime) tfClusterResource.PgVersion = types.StringValue(responseCluster.PgVersion.PgVersionId) tfClusterResource.PgType = types.StringValue(responseCluster.PgType.PgTypeId) tfClusterResource.FarawayReplicaIds = StringSliceToSet(responseCluster.FarawayReplicaIds) @@ -981,13 +982,6 @@ func readCluster(ctx context.Context, client *api.ClusterClient, tfClusterResour tfClusterResource.TransparentDataEncryption.Status = types.StringValue(responseCluster.EncryptionKeyResp.Status) } - if responseCluster.BackupSchedule != nil { - tfClusterResource.BackupSchedule = &commonTerraform.BackupSchedule{ - StartDay: types.StringValue(WeekdaysName[*responseCluster.BackupSchedule.StartDay]), - StartTime: types.StringPointerValue(responseCluster.BackupSchedule.StartTime), - } - } - return nil } @@ -1107,6 +1101,7 @@ func (c *clusterResource) generateGenericClusterModel(ctx context.Context, clust PrivateNetworking: clusterResource.PrivateNetworking.ValueBoolPointer(), ReadOnlyConnections: clusterResource.ReadOnlyConnections.ValueBoolPointer(), BackupRetentionPeriod: clusterResource.BackupRetentionPeriod.ValueStringPointer(), + BackupScheduleTime: clusterResource.BackupScheduleTime.ValueStringPointer(), SuperuserAccess: clusterResource.SuperuserAccess.ValueBoolPointer(), VolumeSnapshot: clusterResource.VolumeSnapshot.ValueBoolPointer(), } @@ -1197,13 +1192,6 @@ func (c *clusterResource) generateGenericClusterModel(ctx context.Context, clust } } - if clusterResource.BackupSchedule != nil { - cluster.BackupSchedule = &commonApi.BackupSchedule{ - StartDay: utils.ToPointer(WeekdaysNumber[clusterResource.BackupSchedule.StartDay.ValueString()]), - StartTime: clusterResource.BackupSchedule.StartTime.ValueStringPointer(), - } - } - return cluster, nil } diff --git a/pkg/provider/resource_fareplica.go b/pkg/provider/resource_fareplica.go index 50ef2ac8..9f95e826 100644 --- a/pkg/provider/resource_fareplica.go +++ b/pkg/provider/resource_fareplica.go @@ -65,7 +65,7 @@ type FAReplicaResourceModel struct { TransparentDataEncryptionAction types.String `tfsdk:"transparent_data_encryption_action"` VolumeSnapshot types.Bool `tfsdk:"volume_snapshot_backup"` Tags []commonTerraform.Tag `tfsdk:"tags"` - BackupSchedule *commonTerraform.BackupSchedule `tfsdk:"backup_schedule"` + BackupScheduleTime types.String `tfsdk:"backup_schedule_time"` Timeouts timeouts.Value `tfsdk:"timeouts"` } @@ -423,7 +423,7 @@ func (r *FAReplicaResource) Schema(ctx context.Context, req resource.SchemaReque plan_modifier.CustomAssignTags(), }, }, - "backup_schedule": resourceBackupSchedule, + "backup_schedule_time": ResourceBackupScheduleTime, }, } } @@ -627,6 +627,7 @@ func readFAReplica(ctx context.Context, client *api.ClusterClient, fAReplicaReso fAReplicaResourceModel.LogsUrl = responseCluster.LogsUrl fAReplicaResourceModel.MetricsUrl = responseCluster.MetricsUrl fAReplicaResourceModel.BackupRetentionPeriod = types.StringPointerValue(responseCluster.BackupRetentionPeriod) + fAReplicaResourceModel.BackupScheduleTime = types.StringPointerValue(responseCluster.BackupScheduleTime) fAReplicaResourceModel.PrivateNetworking = types.BoolPointerValue(responseCluster.PrivateNetworking) fAReplicaResourceModel.ClusterArchitecture = &ClusterArchitectureResourceModel{ Id: responseCluster.ClusterArchitecture.ClusterArchitectureId, @@ -703,13 +704,6 @@ func readFAReplica(ctx context.Context, client *api.ClusterClient, fAReplicaReso }) } - if responseCluster.BackupSchedule != nil { - fAReplicaResourceModel.BackupSchedule = &commonTerraform.BackupSchedule{ - StartDay: types.StringValue(WeekdaysName[*responseCluster.BackupSchedule.StartDay]), - StartTime: types.StringPointerValue(responseCluster.BackupSchedule.StartTime), - } - } - return nil } @@ -777,6 +771,7 @@ func (r *FAReplicaResource) generateGenericFAReplicaModel(ctx context.Context, f CSPAuth: fAReplicaResourceModel.CspAuth.ValueBoolPointer(), PrivateNetworking: fAReplicaResourceModel.PrivateNetworking.ValueBoolPointer(), BackupRetentionPeriod: fAReplicaResourceModel.BackupRetentionPeriod.ValueStringPointer(), + BackupScheduleTime: fAReplicaResourceModel.BackupScheduleTime.ValueStringPointer(), } allowedIpRanges := []models.AllowedIpRange{} @@ -821,13 +816,6 @@ func (r *FAReplicaResource) generateGenericFAReplicaModel(ctx context.Context, f } cluster.Tags = tags - if fAReplicaResourceModel.BackupSchedule != nil { - cluster.BackupSchedule = &commonApi.BackupSchedule{ - StartDay: utils.ToPointer(WeekdaysNumber[fAReplicaResourceModel.BackupSchedule.StartDay.ValueString()]), - StartTime: fAReplicaResourceModel.BackupSchedule.StartTime.ValueStringPointer(), - } - } - return cluster, nil } diff --git a/pkg/provider/resource_pgd.go b/pkg/provider/resource_pgd.go index 10886d7b..608e8e23 100644 --- a/pkg/provider/resource_pgd.go +++ b/pkg/provider/resource_pgd.go @@ -417,7 +417,7 @@ func PgdSchema(ctx context.Context) schema.Schema { Optional: true, Computed: true, }, - "backup_schedule": resourceBackupSchedule, + "backup_schedule_time": ResourceBackupScheduleTime, }, }, }, @@ -690,6 +690,7 @@ func (p pgdResource) Create(ctx context.Context, req resource.CreateRequest, res apiDGModel := pgdApi.DataGroup{ AllowedIpRanges: buildRequestAllowedIpRanges(v.AllowedIpRanges), BackupRetentionPeriod: v.BackupRetentionPeriod, + BackupScheduleTime: v.BackupScheduleTime.ValueStringPointer(), Provider: v.Provider, ClusterArchitecture: clusterArch, CspAuth: v.CspAuth, @@ -707,13 +708,6 @@ func (p pgdResource) Create(ctx context.Context, req resource.CreateRequest, res ReadOnlyConnections: v.ReadOnlyConnections, } - if v.BackupSchedule != nil { - apiDGModel.BackupSchedule = &commonApi.BackupSchedule{ - StartDay: utils.ToPointer(WeekdaysNumber[v.BackupSchedule.StartDay.ValueString()]), - StartTime: v.BackupSchedule.StartTime.ValueStringPointer(), - } - } - *clusterReqBody.Groups = append(*clusterReqBody.Groups, apiDGModel) } @@ -978,6 +972,7 @@ func (p pgdResource) Update(ctx context.Context, req resource.UpdateRequest, res ClusterType: utils.ToPointer("data_group"), AllowedIpRanges: buildRequestAllowedIpRanges(v.AllowedIpRanges), BackupRetentionPeriod: v.BackupRetentionPeriod, + BackupScheduleTime: v.BackupScheduleTime.ValueStringPointer(), CspAuth: v.CspAuth, InstanceType: v.InstanceType, PgConfig: v.PgConfig, @@ -1002,13 +997,6 @@ func (p pgdResource) Update(ctx context.Context, req resource.UpdateRequest, res reqDg.PeAllowedPrincipalIds = principalIds } - if v.BackupSchedule != nil { - reqDg.BackupSchedule = &commonApi.BackupSchedule{ - StartDay: utils.ToPointer(WeekdaysNumber[v.BackupSchedule.StartDay.ValueString()]), - StartTime: v.BackupSchedule.StartTime.ValueStringPointer(), - } - } - *clusterReqBody.Groups = append(*clusterReqBody.Groups, reqDg) } @@ -1410,18 +1398,11 @@ func buildTFGroupsAs(ctx context.Context, diags *diag.Diagnostics, state tfsdk.S allwdIpRngsSet = types.SetValueMust(allwdIpRngsElemType, allowedIpRanges) } - var backupSchedule *commonTerraform.BackupSchedule - if apiRespDgModel.BackupSchedule != nil { - backupSchedule = &commonTerraform.BackupSchedule{ - StartDay: types.StringValue(WeekdaysName[*apiRespDgModel.BackupSchedule.StartDay]), - StartTime: types.StringPointerValue(apiRespDgModel.BackupSchedule.StartTime), - } - } - tfDGModel := terraform.DataGroup{ GroupId: types.StringPointerValue(apiRespDgModel.GroupId), AllowedIpRanges: allwdIpRngsSet, BackupRetentionPeriod: apiRespDgModel.BackupRetentionPeriod, + BackupScheduleTime: types.StringPointerValue(apiRespDgModel.BackupScheduleTime), ClusterArchitecture: clusterArch, ClusterName: types.StringPointerValue(apiRespDgModel.ClusterName), ClusterType: types.StringPointerValue(apiRespDgModel.ClusterType), @@ -1445,7 +1426,6 @@ func buildTFGroupsAs(ctx context.Context, diags *diag.Diagnostics, state tfsdk.S PeAllowedPrincipalIds: types.SetValueMust(types.StringType, principalIds), RoConnectionUri: types.StringPointerValue(apiRespDgModel.RoConnectionUri), ReadOnlyConnections: apiRespDgModel.ReadOnlyConnections, - BackupSchedule: backupSchedule, } outPgdTFResource.DataGroups = append(outPgdTFResource.DataGroups, tfDGModel)