diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d264ac0b8b..53411ffe311 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +Release v1.33.13 (2020-07-27) +=== + +### Service Client Updates +* `service/datasync`: Updates service API and documentation +* `service/dms`: Updates service API, documentation, and paginators + * Basic endpoint settings for relational databases, Preflight validation API. +* `service/ec2`: Updates service API + * m6gd, c6gd, r6gd instances are powered by AWS Graviton2 processors and support local NVMe instance storage +* `service/frauddetector`: Updates service API and documentation +* `service/glue`: Updates service API and documentation + * Add ability to manually resume workflows in AWS Glue providing customers further control over the orchestration of ETL workloads. +* `service/ssm`: Updates service documentation + * Assorted doc ticket-fix updates for Systems Manager. + Release v1.33.12 (2020-07-24) === diff --git a/aws/version.go b/aws/version.go index 1a859e6ba96..e4cde0db28b 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.33.12" +const SDKVersion = "1.33.13" diff --git a/models/apis/datasync/2018-11-09/api-2.json b/models/apis/datasync/2018-11-09/api-2.json index 5ca332364cc..555f087a1d8 100644 --- a/models/apis/datasync/2018-11-09/api-2.json +++ b/models/apis/datasync/2018-11-09/api-2.json @@ -79,6 +79,19 @@ {"shape":"InternalException"} ] }, + "CreateLocationObjectStorage":{ + "name":"CreateLocationObjectStorage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationObjectStorageRequest"}, + "output":{"shape":"CreateLocationObjectStorageResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "CreateLocationS3":{ "name":"CreateLocationS3", "http":{ @@ -209,6 +222,19 @@ {"shape":"InternalException"} ] }, + "DescribeLocationObjectStorage":{ + "name":"DescribeLocationObjectStorage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationObjectStorageRequest"}, + "output":{"shape":"DescribeLocationObjectStorageResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "DescribeLocationS3":{ "name":"DescribeLocationS3", "http":{ @@ -406,7 +432,7 @@ "AgentArnList":{ "type":"list", "member":{"shape":"AgentArn"}, - "max":64, + "max":4, "min":1 }, "AgentList":{ @@ -457,7 +483,7 @@ "members":{ "ActivationKey":{"shape":"ActivationKey"}, "AgentName":{"shape":"TagValue"}, - "Tags":{"shape":"TagList"}, + "Tags":{"shape":"InputTagList"}, "VpcEndpointId":{"shape":"VpcEndpointId"}, "SubnetArns":{"shape":"PLSubnetArnList"}, "SecurityGroupArns":{"shape":"PLSecurityGroupArnList"} @@ -479,7 +505,7 @@ "Subdirectory":{"shape":"EfsSubdirectory"}, "EfsFilesystemArn":{"shape":"EfsFilesystemArn"}, "Ec2Config":{"shape":"Ec2Config"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"InputTagList"} } }, "CreateLocationEfsResponse":{ @@ -500,7 +526,7 @@ "Subdirectory":{"shape":"FsxWindowsSubdirectory"}, "FsxFilesystemArn":{"shape":"FsxFilesystemArn"}, "SecurityGroupArns":{"shape":"Ec2SecurityGroupArnList"}, - "Tags":{"shape":"TagList"}, + "Tags":{"shape":"InputTagList"}, "User":{"shape":"SmbUser"}, "Domain":{"shape":"SmbDomain"}, "Password":{"shape":"SmbPassword"} @@ -524,7 +550,7 @@ "ServerHostname":{"shape":"ServerHostname"}, "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{"shape":"NfsMountOptions"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"InputTagList"} } }, "CreateLocationNfsResponse":{ @@ -533,6 +559,31 @@ "LocationArn":{"shape":"LocationArn"} } }, + "CreateLocationObjectStorageRequest":{ + "type":"structure", + "required":[ + "ServerHostname", + "BucketName", + "AgentArns" + ], + "members":{ + "ServerHostname":{"shape":"ServerHostname"}, + "ServerPort":{"shape":"ObjectStorageServerPort"}, + "ServerProtocol":{"shape":"ObjectStorageServerProtocol"}, + "Subdirectory":{"shape":"S3Subdirectory"}, + "BucketName":{"shape":"ObjectStorageBucketName"}, + "AccessKey":{"shape":"ObjectStorageAccessKey"}, + "SecretKey":{"shape":"ObjectStorageSecretKey"}, + "AgentArns":{"shape":"AgentArnList"}, + "Tags":{"shape":"InputTagList"} + } + }, + "CreateLocationObjectStorageResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, "CreateLocationS3Request":{ "type":"structure", "required":[ @@ -544,7 +595,7 @@ "S3BucketArn":{"shape":"S3BucketArn"}, "S3StorageClass":{"shape":"S3StorageClass"}, "S3Config":{"shape":"S3Config"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"InputTagList"} } }, "CreateLocationS3Response":{ @@ -570,7 +621,7 @@ "Password":{"shape":"SmbPassword"}, "AgentArns":{"shape":"AgentArnList"}, "MountOptions":{"shape":"SmbMountOptions"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"InputTagList"} } }, "CreateLocationSmbResponse":{ @@ -593,7 +644,7 @@ "Options":{"shape":"Options"}, "Excludes":{"shape":"FilterList"}, "Schedule":{"shape":"TaskSchedule"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"InputTagList"} } }, "CreateTaskResponse":{ @@ -708,6 +759,25 @@ "CreationTime":{"shape":"Time"} } }, + "DescribeLocationObjectStorageRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, + "DescribeLocationObjectStorageResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"}, + "LocationUri":{"shape":"LocationUri"}, + "AccessKey":{"shape":"ObjectStorageAccessKey"}, + "ServerPort":{"shape":"ObjectStorageServerPort"}, + "ServerProtocol":{"shape":"ObjectStorageServerProtocol"}, + "AgentArns":{"shape":"AgentArnList"}, + "CreationTime":{"shape":"Time"} + } + }, "DescribeLocationS3Request":{ "type":"structure", "required":["LocationArn"], @@ -902,6 +972,12 @@ "max":2048, "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$" }, + "InputTagList":{ + "type":"list", + "member":{"shape":"TagListEntry"}, + "max":50, + "min":0 + }, "InternalException":{ "type":"structure", "members":{ @@ -959,7 +1035,7 @@ "ListTagsForResourceResponse":{ "type":"structure", "members":{ - "Tags":{"shape":"TagList"}, + "Tags":{"shape":"OutputTagList"}, "NextToken":{"shape":"NextToken"} } }, @@ -1016,7 +1092,7 @@ "LogGroupArn":{ "type":"string", "max":562, - "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):logs:[a-z\\-0-9]*:[0-9]{12}:log-group:([^:\\*]*)$" + "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):logs:[a-z\\-0-9]*:[0-9]{12}:log-group:([^:\\*]*)(:\\*)?$" }, "LogLevel":{ "type":"string", @@ -1068,6 +1144,38 @@ "NFS4_1" ] }, + "ObjectStorageAccessKey":{ + "type":"string", + "max":200, + "min":8, + "pattern":"^.+$" + }, + "ObjectStorageBucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" + }, + "ObjectStorageSecretKey":{ + "type":"string", + "max":200, + "min":8, + "pattern":"^.+$", + "sensitive":true + }, + "ObjectStorageServerPort":{ + "type":"integer", + "box":true, + "max":65536, + "min":1 + }, + "ObjectStorageServerProtocol":{ + "type":"string", + "enum":[ + "HTTPS", + "HTTP" + ] + }, "OnPremConfig":{ "type":"structure", "required":["AgentArns"], @@ -1089,9 +1197,16 @@ "PosixPermissions":{"shape":"PosixPermissions"}, "BytesPerSecond":{"shape":"BytesPerSecond"}, "TaskQueueing":{"shape":"TaskQueueing"}, - "LogLevel":{"shape":"LogLevel"} + "LogLevel":{"shape":"LogLevel"}, + "TransferMode":{"shape":"TransferMode"} } }, + "OutputTagList":{ + "type":"list", + "member":{"shape":"TagListEntry"}, + "max":55, + "min":0 + }, "OverwriteMode":{ "type":"string", "enum":[ @@ -1253,12 +1368,6 @@ "max":50, "min":1 }, - "TagList":{ - "type":"list", - "member":{"shape":"TagListEntry"}, - "max":55, - "min":0 - }, "TagListEntry":{ "type":"structure", "required":["Key"], @@ -1275,7 +1384,7 @@ ], "members":{ "ResourceArn":{"shape":"TaggableResourceArn"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"InputTagList"} } }, "TagResourceResponse":{ @@ -1378,6 +1487,13 @@ ] }, "Time":{"type":"timestamp"}, + "TransferMode":{ + "type":"string", + "enum":[ + "CHANGED", + "ALL" + ] + }, "Uid":{ "type":"string", "enum":[ diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index f41e68db338..ec83d9a4078 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -2,13 +2,14 @@ "version": "2.0", "service": "AWS DataSync

AWS DataSync is a managed data transfer service that makes it simpler for you to automate moving data between on-premises storage and Amazon Simple Storage Service (Amazon S3) or Amazon Elastic File System (Amazon EFS).

This API interface reference for AWS DataSync contains documentation for a programming interface that you can use to manage AWS DataSync.

", "operations": { - "CancelTaskExecution": "

Cancels execution of a task.

When you cancel a task execution, the transfer of some files are abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, AWS DataSync successfully complete the transfer when you start the next task execution.

", - "CreateAgent": "

Activates an AWS DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the AWS Region that you want to activate the agent in. You activate the agent in the AWS Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this AWS Region.

You can activate the agent in a VPC (Virtual private Cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public Internet.

You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run.

Agents are automatically updated by AWS on a regular basis, using a mechanism that ensures minimal interruption to your tasks.

", + "CancelTaskExecution": "

Cancels execution of a task.

When you cancel a task execution, the transfer of some files is abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, AWS DataSync successfully complete the transfer when you start the next task execution.

", + "CreateAgent": "

Activates an AWS DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the AWS Region that you want to activate the agent in. You activate the agent in the AWS Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this AWS Region.

You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public Internet.

You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run.

Agents are automatically updated by AWS on a regular basis, using a mechanism that ensures minimal interruption to your tasks.

", "CreateLocationEfs": "

Creates an endpoint for an Amazon EFS file system.

", "CreateLocationFsxWindows": "

Creates an endpoint for an Amazon FSx for Windows file system.

", - "CreateLocationNfs": "

Defines a file system on a Network File System (NFS) server that can be read from or written to

", + "CreateLocationNfs": "

Defines a file system on a Network File System (NFS) server that can be read from or written to.

", + "CreateLocationObjectStorage": "

Creates an endpoint for a self-managed object storage bucket.

", "CreateLocationS3": "

Creates an endpoint for an Amazon S3 bucket.

For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section.

For more information, see https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location in the AWS DataSync User Guide.

", - "CreateLocationSmb": "

Defines a file system on an Server Message Block (SMB) server that can be read from or written to.

", + "CreateLocationSmb": "

Defines a file system on a Server Message Block (SMB) server that can be read from or written to.

", "CreateTask": "

Creates a task. A task is a set of two locations (source and destination) and a set of Options that you use to control the behavior of a task. If you don't specify Options when you create a task, AWS DataSync populates them with service defaults.

When you create a task, it first enters the CREATING state. During CREATING AWS DataSync attempts to mount the on-premises Network File System (NFS) location. The task transitions to the AVAILABLE state without waiting for the AWS location to become mounted. If required, AWS DataSync mounts the AWS location before each task execution.

If an agent that is associated with a source (NFS) location goes offline, the task transitions to the UNAVAILABLE status. If the status of the task remains in the CREATING status for more than a few minutes, it means that your agent might be having trouble mounting the source NFS file system. Check the task's ErrorCode and ErrorDetail. Mount issues are often caused by either a misconfigured firewall or a mistyped NFS server host name.

", "DeleteAgent": "

Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of the agent in your request. The operation disassociates the agent from your AWS account. However, it doesn't delete the agent virtual machine (VM) from your on-premises environment.

", "DeleteLocation": "

Deletes the configuration of a location used by AWS DataSync.

", @@ -16,14 +17,15 @@ "DescribeAgent": "

Returns metadata such as the name, the network interfaces, and the status (that is, whether the agent is running or not) for an agent. To specify which agent to describe, use the Amazon Resource Name (ARN) of the agent in your request.

", "DescribeLocationEfs": "

Returns metadata, such as the path information about an Amazon EFS location.

", "DescribeLocationFsxWindows": "

Returns metadata, such as the path information about an Amazon FSx for Windows location.

", - "DescribeLocationNfs": "

Returns metadata, such as the path information, about a NFS location.

", + "DescribeLocationNfs": "

Returns metadata, such as the path information, about an NFS location.

", + "DescribeLocationObjectStorage": "

Returns metadata about a self-managed object storage server location.

", "DescribeLocationS3": "

Returns metadata, such as bucket name, about an Amazon S3 bucket location.

", - "DescribeLocationSmb": "

Returns metadata, such as the path and user information about a SMB location.

", + "DescribeLocationSmb": "

Returns metadata, such as the path and user information about an SMB location.

", "DescribeTask": "

Returns metadata about a task.

", "DescribeTaskExecution": "

Returns detailed metadata about a task that is being executed.

", "ListAgents": "

Returns a list of agents owned by an AWS account in the AWS Region specified in the request. The returned list is ordered by agent Amazon Resource Name (ARN).

By default, this operation returns a maximum of 100 agents. This operation supports pagination that enables you to optionally reduce the number of agents returned in a response.

If you have more agents than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a marker that you can specify in your next request to fetch the next page of agents.

", - "ListLocations": "

Returns a lists of source and destination locations.

If you have more locations than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a token that you can specify in your next request to fetch the next page of locations.

", - "ListTagsForResource": "

Returns all the tags associated with a specified resources.

", + "ListLocations": "

Returns a list of source and destination locations.

If you have more locations than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a token that you can specify in your next request to fetch the next page of locations.

", + "ListTagsForResource": "

Returns all the tags associated with a specified resource.

", "ListTaskExecutions": "

Returns a list of executed tasks.

", "ListTasks": "

Returns a list of all the tasks.

", "StartTaskExecution": "

Starts a specific invocation of a task. A TaskExecution value represents an individual run of a task. Each task can have at most one TaskExecution at a time.

TaskExecution has the following transition phases: INITIALIZING | PREPARING | TRANSFERRING | VERIFYING | SUCCESS/FAILURE.

For detailed information, see the Task Execution section in the Components and Terminology topic in the AWS DataSync User Guide.

", @@ -54,7 +56,9 @@ "AgentArnList": { "base": null, "refs": { + "CreateLocationObjectStorageRequest$AgentArns": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", "CreateLocationSmbRequest$AgentArns": "

The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block (SMB) location.

", + "DescribeLocationObjectStorageResponse$AgentArns": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", "DescribeLocationSmbResponse$AgentArns": "

The Amazon Resource Name (ARN) of the source SMB file system location that is created.

", "OnPremConfig$AgentArns": "

ARNs)of the agents to use for an NFS location.

" } @@ -140,6 +144,16 @@ "refs": { } }, + "CreateLocationObjectStorageRequest": { + "base": "

CreateLocationObjectStorageRequest

", + "refs": { + } + }, + "CreateLocationObjectStorageResponse": { + "base": "

CreateLocationObjectStorageResponse

", + "refs": { + } + }, "CreateLocationS3Request": { "base": "

CreateLocationS3Request

", "refs": { @@ -240,6 +254,16 @@ "refs": { } }, + "DescribeLocationObjectStorageRequest": { + "base": "

DescribeLocationObjectStorageRequest

", + "refs": { + } + }, + "DescribeLocationObjectStorageResponse": { + "base": "

DescribeLocationObjectStorageResponse

", + "refs": { + } + }, "DescribeLocationS3Request": { "base": "

DescribeLocationS3Request

", "refs": { @@ -313,7 +337,7 @@ "base": null, "refs": { "CreateLocationFsxWindowsRequest$SecurityGroupArns": "

The Amazon Resource Names (ARNs) of the security groups that are to use to configure the FSx for Windows file system.

", - "DescribeLocationFsxWindowsResponse$SecurityGroupArns": "

The Amazon Resource Names (ARNs) of the security groups that are configured for the for the FSx for Windows file system.

", + "DescribeLocationFsxWindowsResponse$SecurityGroupArns": "

The Amazon Resource Names (ARNs) of the security groups that are configured for the FSx for Windows file system.

", "Ec2Config$SecurityGroupArns": "

The Amazon Resource Names (ARNs) of the security groups that are configured for the Amazon EC2 resource.

" } }, @@ -333,19 +357,19 @@ "EfsSubdirectory": { "base": null, "refs": { - "CreateLocationEfsRequest$Subdirectory": "

A subdirectory in the location’s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.

Subdirectory must be specified with forward slashes. For example /path/to/folder.

" + "CreateLocationEfsRequest$Subdirectory": "

A subdirectory in the location’s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.

Subdirectory must be specified with forward slashes. For example, /path/to/folder.

" } }, "Endpoint": { "base": null, "refs": { - "PrivateLinkConfig$PrivateLinkEndpoint": "

The private endpoint that is configured for an agent that has access to IP addresses in a PrivateLink. An agent that is configured with this endpoint will not be accessible over the public Internet.

" + "PrivateLinkConfig$PrivateLinkEndpoint": "

The private endpoint that is configured for an agent that has access to IP addresses in a PrivateLink. An agent that is configured with this endpoint will not be accessible over the public internet.

" } }, "EndpointType": { "base": null, "refs": { - "DescribeAgentResponse$EndpointType": "

The type of endpoint that your agent is connected to. If the endpoint is a VPC endpoint, the agent is not accessible over the public Internet.

" + "DescribeAgentResponse$EndpointType": "

The type of endpoint that your agent is connected to. If the endpoint is a VPC endpoint, the agent is not accessible over the public internet.

" } }, "FilterList": { @@ -401,6 +425,20 @@ "S3Config$BucketAccessRoleArn": "

The Amazon S3 bucket to access. This bucket is used as a parameter in the CreateLocationS3 operation.

" } }, + "InputTagList": { + "base": null, + "refs": { + "CreateAgentRequest$Tags": "

The key-value pair that represents the tag that you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

", + "CreateLocationEfsRequest$Tags": "

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

", + "CreateLocationFsxWindowsRequest$Tags": "

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

", + "CreateLocationNfsRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", + "CreateLocationObjectStorageRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", + "CreateLocationS3Request$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", + "CreateLocationSmbRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", + "CreateTaskRequest$Tags": "

The key-value pair that represents the tag that you want to add to the resource. The value can be an empty string.

", + "TagResourceRequest$Tags": "

The tags to apply.

" + } + }, "InternalException": { "base": "

This exception is thrown when an error occurs in the AWS DataSync service.

", "refs": { @@ -467,21 +505,24 @@ "CreateLocationEfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the Amazon EFS file system location that is created.

", "CreateLocationFsxWindowsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the FSx for Windows file system location that is created.

", "CreateLocationNfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the source NFS file system location that is created.

", + "CreateLocationObjectStorageResponse$LocationArn": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", "CreateLocationS3Response$LocationArn": "

The Amazon Resource Name (ARN) of the source Amazon S3 bucket location that is created.

", "CreateLocationSmbResponse$LocationArn": "

The Amazon Resource Name (ARN) of the source SMB file system location that is created.

", "CreateTaskRequest$SourceLocationArn": "

The Amazon Resource Name (ARN) of the source location for the task.

", "CreateTaskRequest$DestinationLocationArn": "

The Amazon Resource Name (ARN) of an AWS storage resource's location.

", "DeleteLocationRequest$LocationArn": "

The Amazon Resource Name (ARN) of the location to delete.

", "DescribeLocationEfsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the EFS location to describe.

", - "DescribeLocationEfsResponse$LocationArn": "

The Amazon resource Name (ARN) of the EFS location that was described.

", + "DescribeLocationEfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the EFS location that was described.

", "DescribeLocationFsxWindowsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the FSx for Windows location to describe.

", - "DescribeLocationFsxWindowsResponse$LocationArn": "

The Amazon resource Name (ARN) of the FSx for Windows location that was described.

", - "DescribeLocationNfsRequest$LocationArn": "

The Amazon resource Name (ARN) of the NFS location to describe.

", - "DescribeLocationNfsResponse$LocationArn": "

The Amazon resource Name (ARN) of the NFS location that was described.

", + "DescribeLocationFsxWindowsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the FSx for Windows location that was described.

", + "DescribeLocationNfsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the NFS location to describe.

", + "DescribeLocationNfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the NFS location that was described.

", + "DescribeLocationObjectStorageRequest$LocationArn": "

The Amazon Resource Name (ARN) of the self-managed object storage server location that was described.

", + "DescribeLocationObjectStorageResponse$LocationArn": "

The Amazon Resource Name (ARN) of the self-managed object storage server location to describe.

", "DescribeLocationS3Request$LocationArn": "

The Amazon Resource Name (ARN) of the Amazon S3 bucket location to describe.

", "DescribeLocationS3Response$LocationArn": "

The Amazon Resource Name (ARN) of the Amazon S3 bucket location.

", - "DescribeLocationSmbRequest$LocationArn": "

The Amazon resource Name (ARN) of the SMB location to describe.

", - "DescribeLocationSmbResponse$LocationArn": "

The Amazon resource Name (ARN) of the SMB location that was described.

", + "DescribeLocationSmbRequest$LocationArn": "

The Amazon Resource Name (ARN) of the SMB location to describe.

", + "DescribeLocationSmbResponse$LocationArn": "

The Amazon Resource Name (ARN) of the SMB location that was described.

", "DescribeTaskResponse$SourceLocationArn": "

The Amazon Resource Name (ARN) of the source file system's location.

", "DescribeTaskResponse$DestinationLocationArn": "

The Amazon Resource Name (ARN) of the AWS storage resource's location.

", "LocationListEntry$LocationArn": "

The Amazon Resource Name (ARN) of the location. For Network File System (NFS) or Amazon EFS, the location is the export path. For Amazon S3, the location is the prefix path that you want to mount and use as the root of the location.

" @@ -505,6 +546,7 @@ "DescribeLocationEfsResponse$LocationUri": "

The URL of the EFS location that was described.

", "DescribeLocationFsxWindowsResponse$LocationUri": "

The URL of the FSx for Windows location that was described.

", "DescribeLocationNfsResponse$LocationUri": "

The URL of the source NFS location that was described.

", + "DescribeLocationObjectStorageResponse$LocationUri": "

The URL of the source self-managed object storage server location that was described.

", "DescribeLocationS3Response$LocationUri": "

The URL of the Amazon S3 location that was described.

", "DescribeLocationSmbResponse$LocationUri": "

The URL of the source SBM location that was described.

", "LocationListEntry$LocationUri": "

Represents a list of URLs of a location. LocationUri returns an array that contains a list of locations when the ListLocations operation is called.

Format: TYPE://GLOBAL_ID/SUBDIR.

TYPE designates the type of location. Valid values: NFS | EFS | S3.

GLOBAL_ID is the globally unique identifier of the resource that backs the location. An example for EFS is us-east-2.fs-abcd1234. An example for Amazon S3 is the bucket name, such as myBucket. An example for NFS is a valid IPv4 address or a host name compliant with Domain Name Service (DNS).

SUBDIR is a valid file system path, delimited by forward slashes as is the *nix convention. For NFS and Amazon EFS, it's the export path to mount the location. For Amazon S3, it's the prefix path that you mount to and treat as the root of the location.

" @@ -513,7 +555,7 @@ "LogGroupArn": { "base": null, "refs": { - "CreateTaskRequest$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is used to monitor and log events in the task.

For more information on these groups, see Working with Log Groups and Log Streams in the Amazon CloudWatch User Guide.

For more information about how to use CloudWatch Logs with DataSync, see Monitoring Your Task in the AWS DataSync User Guide.

", + "CreateTaskRequest$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is used to monitor and log events in the task.

", "DescribeTaskResponse$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the task.

For more information on these groups, see Working with Log Groups and Log Streams in the Amazon CloudWatch User Guide.

", "UpdateTaskRequest$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the resource name of the CloudWatch LogGroup.

" } @@ -521,7 +563,7 @@ "LogLevel": { "base": null, "refs": { - "Options$LogLevel": "

A value that determines the type of logs DataSync will deliver to your AWS CloudWatch Logs file. If set to OFF, no logs will be delivered. BASIC will deliver a few logs per transfer operation and TRANSFER will deliver a verbose log that contains logs for every file that is transferred.

" + "Options$LogLevel": "

A value that determines the type of logs that DataSync publishes to a log stream in the Amazon CloudWatch log group that you provide. For more information about providing a log group for DataSync, see CloudWatchLogGroupArn. If set to OFF, no logs are published. BASIC publishes logs on errors for individual files transferred, and TRANSFER publishes logs for every file or object that is transferred and integrity checked.

" } }, "MaxResults": { @@ -572,7 +614,7 @@ "NfsSubdirectory": { "base": null, "refs": { - "CreateLocationNfsRequest$Subdirectory": "

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" + "CreateLocationNfsRequest$Subdirectory": "

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

" } }, "NfsVersion": { @@ -581,23 +623,62 @@ "NfsMountOptions$Version": "

The specific NFS version that you want DataSync to use to mount your NFS share. If the server refuses to use the version specified, the sync will fail. If you don't specify a version, DataSync defaults to AUTOMATIC. That is, DataSync automatically selects a version based on negotiation with the NFS server.

You can specify the following NFS versions:

" } }, + "ObjectStorageAccessKey": { + "base": null, + "refs": { + "CreateLocationObjectStorageRequest$AccessKey": "

Optional. The access key is used if credentials are required to access the self-managed object storage server.

", + "DescribeLocationObjectStorageResponse$AccessKey": "

Optional. The access key is used if credentials are required to access the self-managed object storage server.

" + } + }, + "ObjectStorageBucketName": { + "base": null, + "refs": { + "CreateLocationObjectStorageRequest$BucketName": "

The bucket on the self-managed object storage server that is used to read data from.

" + } + }, + "ObjectStorageSecretKey": { + "base": null, + "refs": { + "CreateLocationObjectStorageRequest$SecretKey": "

Optional. The secret key is used if credentials are required to access the self-managed object storage server.

" + } + }, + "ObjectStorageServerPort": { + "base": null, + "refs": { + "CreateLocationObjectStorageRequest$ServerPort": "

The port that your self-managed object storage server accepts inbound network traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 (HTTPS). You can specify a custom port if your self-managed object storage server requires one.

", + "DescribeLocationObjectStorageResponse$ServerPort": "

The port that your self-managed object storage server accepts inbound network traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 (HTTPS).

" + } + }, + "ObjectStorageServerProtocol": { + "base": null, + "refs": { + "CreateLocationObjectStorageRequest$ServerProtocol": "

The protocol that the object storage server uses to communicate. Valid values are HTTP or HTTPS.

", + "DescribeLocationObjectStorageResponse$ServerProtocol": "

The protocol that the object storage server uses to communicate. Valid values are HTTP or HTTPS.

" + } + }, "OnPremConfig": { "base": "

A list of Amazon Resource Names (ARNs) of agents to use for a Network File System (NFS) location.

", "refs": { - "CreateLocationNfsRequest$OnPremConfig": "

Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server.

", + "CreateLocationNfsRequest$OnPremConfig": "

Contains a list of Amazon Resource Names (ARNs) of agents that are used to connect to an NFS server.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

", "DescribeLocationNfsResponse$OnPremConfig": null } }, "Options": { "base": "

Represents the options that are available to control the behavior of a StartTaskExecution operation. Behavior includes preserving metadata such as user ID (UID), group ID (GID), and file permissions, and also overwriting files in the destination, data integrity verification, and so on.

A task has a set of default options associated with it. If you don't specify an option in StartTaskExecution, the default value is used. You can override the defaults options on each task execution by specifying an overriding Options value to StartTaskExecution.

", "refs": { - "CreateTaskRequest$Options": "

The set of configuration options that control the behavior of a single execution of the task that occurs when you call StartTaskExecution. You can configure these options to preserve metadata such as user ID (UID) and group ID (GID), file permissions, data integrity verification, and so on.

For each individual task execution, you can override these options by specifying the OverrideOptions before starting a the task execution. For more information, see the operation.

", + "CreateTaskRequest$Options": "

The set of configuration options that control the behavior of a single execution of the task that occurs when you call StartTaskExecution. You can configure these options to preserve metadata such as user ID (UID) and group ID (GID), file permissions, data integrity verification, and so on.

For each individual task execution, you can override these options by specifying the OverrideOptions before starting the task execution. For more information, see the operation.

", "DescribeTaskExecutionResponse$Options": null, "DescribeTaskResponse$Options": "

The set of configuration options that control the behavior of a single execution of the task that occurs when you call StartTaskExecution. You can configure these options to preserve metadata such as user ID (UID) and group (GID), file permissions, data integrity verification, and so on.

For each individual task execution, you can override these options by specifying the overriding OverrideOptions value to operation.

", "StartTaskExecutionRequest$OverrideOptions": null, "UpdateTaskRequest$Options": null } }, + "OutputTagList": { + "base": null, + "refs": { + "ListTagsForResourceResponse$Tags": "

Array of resource tags.

" + } + }, "OverwriteMode": { "base": null, "refs": { @@ -645,7 +726,7 @@ } }, "PrivateLinkConfig": { - "base": "

The VPC endpoint, subnet and security group that an agent uses to access IP addresses in a VPC (Virtual Private Cloud).

", + "base": "

The VPC endpoint, subnet, and security group that an agent uses to access IP addresses in a VPC (Virtual Private Cloud).

", "refs": { "DescribeAgentResponse$PrivateLinkConfig": "

The subnet and the security group that DataSync used to access a VPC endpoint.

" } @@ -673,6 +754,7 @@ "S3Subdirectory": { "base": null, "refs": { + "CreateLocationObjectStorageRequest$Subdirectory": "

The subdirectory in the self-managed object storage server that is used to read data from.

", "CreateLocationS3Request$Subdirectory": "

A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is used to read data from the S3 source location or write data to the S3 destination.

" } }, @@ -685,7 +767,8 @@ "ServerHostname": { "base": null, "refs": { - "CreateLocationNfsRequest$ServerHostname": "

The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this host name to mount the NFS server in a network.

This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.

", + "CreateLocationNfsRequest$ServerHostname": "

The name of the NFS server. This value is the IP address or Domain Name Service (DNS) name of the NFS server. An agent that is installed on-premises uses this host name to mount the NFS server in a network.

If you are copying data to or from your AWS Snowcone device, see NFS Server on AWS Snowcone for more information.

This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.

", + "CreateLocationObjectStorageRequest$ServerHostname": "

The name of the self-managed object storage server. This value is the IP address or Domain Name Service (DNS) name of the object storage server. An agent uses this host name to mount the object storage server in a network.

", "CreateLocationSmbRequest$ServerHostname": "

The name of the SMB server. This value is the IP address or Domain Name Service (DNS) name of the SMB server. An agent that is installed on-premises uses this hostname to mount the SMB server in a network.

This name must either be DNS-compliant or must be an IP version 4 (IPv4) address.

" } }, @@ -715,7 +798,7 @@ "SmbSubdirectory": { "base": null, "refs": { - "CreateLocationSmbRequest$Subdirectory": "

The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.

Subdirectory must be specified with forward slashes. For example /path/to/folder.

To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.

" + "CreateLocationSmbRequest$Subdirectory": "

The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.

Subdirectory must be specified with forward slashes. For example, /path/to/folder.

To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.

" } }, "SmbUser": { @@ -762,24 +845,11 @@ "UntagResourceRequest$Keys": "

The keys in the key-value pair in the tag to remove.

" } }, - "TagList": { - "base": null, - "refs": { - "CreateAgentRequest$Tags": "

The key-value pair that represents the tag that you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

", - "CreateLocationEfsRequest$Tags": "

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

", - "CreateLocationFsxWindowsRequest$Tags": "

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

", - "CreateLocationNfsRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", - "CreateLocationS3Request$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", - "CreateLocationSmbRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", - "CreateTaskRequest$Tags": "

The key-value pair that represents the tag that you want to add to the resource. The value can be an empty string.

", - "ListTagsForResourceResponse$Tags": "

Array of resource tags.

", - "TagResourceRequest$Tags": "

The tags to apply.

" - } - }, "TagListEntry": { "base": "

Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.

", "refs": { - "TagList$member": null + "InputTagList$member": null, + "OutputTagList$member": null } }, "TagResourceRequest": { @@ -878,7 +948,7 @@ "TaskQueueing": { "base": null, "refs": { - "Options$TaskQueueing": "

A value that determines whether tasks should be queued before executing the tasks. If set to ENABLED, the tasks will be queued. The default is ENABLED.

If you use the same agent to run multiple tasks you can enable the tasks to run in series. For more information see queue-task-execution.

" + "Options$TaskQueueing": "

A value that determines whether tasks should be queued before executing the tasks. If set to ENABLED, the tasks will be queued. The default is ENABLED.

If you use the same agent to run multiple tasks, you can enable the tasks to run in series. For more information, see queue-task-execution.

" } }, "TaskSchedule": { @@ -904,12 +974,19 @@ "DescribeLocationEfsResponse$CreationTime": "

The time that the EFS location was created.

", "DescribeLocationFsxWindowsResponse$CreationTime": "

The time that the FSx for Windows location was created.

", "DescribeLocationNfsResponse$CreationTime": "

The time that the NFS location was created.

", + "DescribeLocationObjectStorageResponse$CreationTime": "

The time that the self-managed object storage server agent was created.

", "DescribeLocationS3Response$CreationTime": "

The time that the Amazon S3 bucket location was created.

", "DescribeLocationSmbResponse$CreationTime": "

The time that the SMB location was created.

", "DescribeTaskExecutionResponse$StartTime": "

The time that the task execution was started.

", "DescribeTaskResponse$CreationTime": "

The time that the task was created.

" } }, + "TransferMode": { + "base": null, + "refs": { + "Options$TransferMode": "

TransferMode has two values: CHANGED and ALL. CHANGED performs an \"incremental\" or \"delta sync\", it compares file modification time between source and destination to determine which files need to be transferred. ALL skips destination inventory and transfers all files discovered on the source.

" + } + }, "Uid": { "base": null, "refs": { @@ -949,14 +1026,14 @@ "VerifyMode": { "base": null, "refs": { - "Options$VerifyMode": "

A value that determines whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred.

Default value: POINT_IN_TIME_CONSISTENT.

POINT_IN_TIME_CONSISTENT: Perform verification (recommended).

ONLY_FILES_TRANSFERRED: Perform verification on only files that were transferred.

NONE: Skip verification.

" + "Options$VerifyMode": "

A value that determines whether a data integrity verification should be performed at the end of a task execution after all data and metadata have been transferred. For more information, see create-task

Default value: POINT_IN_TIME_CONSISTENT.

ONLY_FILES_TRANSFERRED (recommended): Perform verification only on files that were transferred.

POINT_IN_TIME_CONSISTENT: Scan the entire source and entire destination at the end of the transfer to verify that source and destination are fully synchronized. This option isn't supported when transferring to S3 Glacier or S3 Glacier Deep Archive storage classes.

NONE: No additional verification is done at the end of the transfer, but all data transmissions are integrity-checked with checksum verification during the transfer.

" } }, "VpcEndpointId": { "base": null, "refs": { - "CreateAgentRequest$VpcEndpointId": "

The ID of the VPC (Virtual Private Cloud) endpoint that the agent has access to. This is the client-side VPC endpoint, also called a PrivateLink. If you don't have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service Configuration in the AWS VPC User Guide.

VPC endpoint ID looks like this: vpce-01234d5aff67890e1.

", - "PrivateLinkConfig$VpcEndpointId": "

The ID of the VPC endpoint that is configured for an agent. An agent that is configured with a VPC endpoint will not be accessible over the public Internet.

" + "CreateAgentRequest$VpcEndpointId": "

The ID of the VPC (virtual private cloud) endpoint that the agent has access to. This is the client-side VPC endpoint, also called a PrivateLink. If you don't have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service Configuration in the Amazon VPC User Guide.

VPC endpoint ID looks like this: vpce-01234d5aff67890e1.

", + "PrivateLinkConfig$VpcEndpointId": "

The ID of the VPC endpoint that is configured for an agent. An agent that is configured with a VPC endpoint will not be accessible over the public internet.

" } }, "long": { diff --git a/models/apis/dms/2016-01-01/api-2.json b/models/apis/dms/2016-01-01/api-2.json index 3b3f9a05be5..747d51190e6 100644 --- a/models/apis/dms/2016-01-01/api-2.json +++ b/models/apis/dms/2016-01-01/api-2.json @@ -36,6 +36,20 @@ {"shape":"ResourceNotFoundFault"} ] }, + "CancelReplicationTaskAssessmentRun":{ + "name":"CancelReplicationTaskAssessmentRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelReplicationTaskAssessmentRunMessage"}, + "output":{"shape":"CancelReplicationTaskAssessmentRunResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, "CreateEndpoint":{ "name":"CreateEndpoint", "http":{ @@ -221,6 +235,20 @@ {"shape":"InvalidResourceStateFault"} ] }, + "DeleteReplicationTaskAssessmentRun":{ + "name":"DeleteReplicationTaskAssessmentRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteReplicationTaskAssessmentRunMessage"}, + "output":{"shape":"DeleteReplicationTaskAssessmentRunResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", "http":{ @@ -230,6 +258,20 @@ "input":{"shape":"DescribeAccountAttributesMessage"}, "output":{"shape":"DescribeAccountAttributesResponse"} }, + "DescribeApplicableIndividualAssessments":{ + "name":"DescribeApplicableIndividualAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeApplicableIndividualAssessmentsMessage"}, + "output":{"shape":"DescribeApplicableIndividualAssessmentsResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"} + ] + }, "DescribeCertificates":{ "name":"DescribeCertificates", "http":{ @@ -388,6 +430,30 @@ {"shape":"ResourceNotFoundFault"} ] }, + "DescribeReplicationTaskAssessmentRuns":{ + "name":"DescribeReplicationTaskAssessmentRuns", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationTaskAssessmentRunsMessage"}, + "output":{"shape":"DescribeReplicationTaskAssessmentRunsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, + "DescribeReplicationTaskIndividualAssessments":{ + "name":"DescribeReplicationTaskIndividualAssessments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReplicationTaskIndividualAssessmentsMessage"}, + "output":{"shape":"DescribeReplicationTaskIndividualAssessmentsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundFault"} + ] + }, "DescribeReplicationTasks":{ "name":"DescribeReplicationTasks", "http":{ @@ -618,6 +684,29 @@ {"shape":"ResourceNotFoundFault"} ] }, + "StartReplicationTaskAssessmentRun":{ + "name":"StartReplicationTaskAssessmentRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartReplicationTaskAssessmentRunMessage"}, + "output":{"shape":"StartReplicationTaskAssessmentRunResponse"}, + "errors":[ + {"shape":"AccessDeniedFault"}, + {"shape":"ResourceNotFoundFault"}, + {"shape":"InvalidResourceStateFault"}, + {"shape":"KMSAccessDeniedFault"}, + {"shape":"KMSDisabledFault"}, + {"shape":"KMSFault"}, + {"shape":"KMSInvalidStateFault"}, + {"shape":"KMSNotFoundFault"}, + {"shape":"KMSKeyNotAccessibleFault"}, + {"shape":"S3AccessDeniedFault"}, + {"shape":"S3ResourceNotFoundFault"}, + {"shape":"ResourceAlreadyExistsFault"} + ] + }, "StopReplicationTask":{ "name":"StopReplicationTask", "http":{ @@ -729,6 +818,19 @@ }, "Boolean":{"type":"boolean"}, "BooleanOptional":{"type":"boolean"}, + "CancelReplicationTaskAssessmentRunMessage":{ + "type":"structure", + "required":["ReplicationTaskAssessmentRunArn"], + "members":{ + "ReplicationTaskAssessmentRunArn":{"shape":"String"} + } + }, + "CancelReplicationTaskAssessmentRunResponse":{ + "type":"structure", + "members":{ + "ReplicationTaskAssessmentRun":{"shape":"ReplicationTaskAssessmentRun"} + } + }, "Certificate":{ "type":"structure", "members":{ @@ -802,7 +904,13 @@ "KafkaSettings":{"shape":"KafkaSettings"}, "ElasticsearchSettings":{"shape":"ElasticsearchSettings"}, "NeptuneSettings":{"shape":"NeptuneSettings"}, - "RedshiftSettings":{"shape":"RedshiftSettings"} + "RedshiftSettings":{"shape":"RedshiftSettings"}, + "PostgreSQLSettings":{"shape":"PostgreSQLSettings"}, + "MySQLSettings":{"shape":"MySQLSettings"}, + "OracleSettings":{"shape":"OracleSettings"}, + "SybaseSettings":{"shape":"SybaseSettings"}, + "MicrosoftSQLServerSettings":{"shape":"MicrosoftSQLServerSettings"}, + "IBMDb2Settings":{"shape":"IBMDb2Settings"} } }, "CreateEndpointResponse":{ @@ -1001,6 +1109,19 @@ "members":{ } }, + "DeleteReplicationTaskAssessmentRunMessage":{ + "type":"structure", + "required":["ReplicationTaskAssessmentRunArn"], + "members":{ + "ReplicationTaskAssessmentRunArn":{"shape":"String"} + } + }, + "DeleteReplicationTaskAssessmentRunResponse":{ + "type":"structure", + "members":{ + "ReplicationTaskAssessmentRun":{"shape":"ReplicationTaskAssessmentRun"} + } + }, "DeleteReplicationTaskMessage":{ "type":"structure", "required":["ReplicationTaskArn"], @@ -1026,6 +1147,25 @@ "UniqueAccountIdentifier":{"shape":"String"} } }, + "DescribeApplicableIndividualAssessmentsMessage":{ + "type":"structure", + "members":{ + "ReplicationTaskArn":{"shape":"String"}, + "ReplicationInstanceArn":{"shape":"String"}, + "SourceEngineName":{"shape":"String"}, + "TargetEngineName":{"shape":"String"}, + "MigrationType":{"shape":"MigrationTypeValue"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeApplicableIndividualAssessmentsResponse":{ + "type":"structure", + "members":{ + "IndividualAssessmentNames":{"shape":"IndividualAssessmentNameList"}, + "Marker":{"shape":"String"} + } + }, "DescribeCertificatesMessage":{ "type":"structure", "members":{ @@ -1242,6 +1382,36 @@ "ReplicationTaskAssessmentResults":{"shape":"ReplicationTaskAssessmentResultList"} } }, + "DescribeReplicationTaskAssessmentRunsMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReplicationTaskAssessmentRunsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReplicationTaskAssessmentRuns":{"shape":"ReplicationTaskAssessmentRunList"} + } + }, + "DescribeReplicationTaskIndividualAssessmentsMessage":{ + "type":"structure", + "members":{ + "Filters":{"shape":"FilterList"}, + "MaxRecords":{"shape":"IntegerOptional"}, + "Marker":{"shape":"String"} + } + }, + "DescribeReplicationTaskIndividualAssessmentsResponse":{ + "type":"structure", + "members":{ + "Marker":{"shape":"String"}, + "ReplicationTaskIndividualAssessments":{"shape":"ReplicationTaskIndividualAssessmentList"} + } + }, "DescribeReplicationTasksMessage":{ "type":"structure", "members":{ @@ -1371,7 +1541,13 @@ "KafkaSettings":{"shape":"KafkaSettings"}, "ElasticsearchSettings":{"shape":"ElasticsearchSettings"}, "NeptuneSettings":{"shape":"NeptuneSettings"}, - "RedshiftSettings":{"shape":"RedshiftSettings"} + "RedshiftSettings":{"shape":"RedshiftSettings"}, + "PostgreSQLSettings":{"shape":"PostgreSQLSettings"}, + "MySQLSettings":{"shape":"MySQLSettings"}, + "OracleSettings":{"shape":"OracleSettings"}, + "SybaseSettings":{"shape":"SybaseSettings"}, + "MicrosoftSQLServerSettings":{"shape":"MicrosoftSQLServerSettings"}, + "IBMDb2Settings":{"shape":"IBMDb2Settings"} } }, "EndpointList":{ @@ -1426,6 +1602,10 @@ "member":{"shape":"EventSubscription"} }, "ExceptionMessage":{"type":"string"}, + "ExcludeTestList":{ + "type":"list", + "member":{"shape":"String"} + }, "Filter":{ "type":"structure", "required":[ @@ -1445,6 +1625,16 @@ "type":"list", "member":{"shape":"String"} }, + "IBMDb2Settings":{ + "type":"structure", + "members":{ + "DatabaseName":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "Port":{"shape":"IntegerOptional"}, + "ServerName":{"shape":"String"}, + "Username":{"shape":"String"} + } + }, "ImportCertificateMessage":{ "type":"structure", "required":["CertificateIdentifier"], @@ -1461,6 +1651,14 @@ "Certificate":{"shape":"Certificate"} } }, + "IncludeTestList":{ + "type":"list", + "member":{"shape":"String"} + }, + "IndividualAssessmentNameList":{ + "type":"list", + "member":{"shape":"String"} + }, "InsufficientResourceCapacityFault":{ "type":"structure", "members":{ @@ -1505,6 +1703,13 @@ }, "exception":true }, + "KMSFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, "KMSInvalidStateFault":{ "type":"structure", "members":{ @@ -1537,7 +1742,13 @@ "type":"structure", "members":{ "Broker":{"shape":"String"}, - "Topic":{"shape":"String"} + "Topic":{"shape":"String"}, + "MessageFormat":{"shape":"MessageFormatValue"}, + "IncludeTransactionDetails":{"shape":"BooleanOptional"}, + "IncludePartitionValue":{"shape":"BooleanOptional"}, + "PartitionIncludeSchemaTable":{"shape":"BooleanOptional"}, + "IncludeTableAlterOperations":{"shape":"BooleanOptional"}, + "IncludeControlDetails":{"shape":"BooleanOptional"} } }, "KeyList":{ @@ -1578,6 +1789,16 @@ "json-unformatted" ] }, + "MicrosoftSQLServerSettings":{ + "type":"structure", + "members":{ + "Port":{"shape":"IntegerOptional"}, + "DatabaseName":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "ServerName":{"shape":"String"}, + "Username":{"shape":"String"} + } + }, "MigrationTypeValue":{ "type":"string", "enum":[ @@ -1612,7 +1833,13 @@ "KafkaSettings":{"shape":"KafkaSettings"}, "ElasticsearchSettings":{"shape":"ElasticsearchSettings"}, "NeptuneSettings":{"shape":"NeptuneSettings"}, - "RedshiftSettings":{"shape":"RedshiftSettings"} + "RedshiftSettings":{"shape":"RedshiftSettings"}, + "PostgreSQLSettings":{"shape":"PostgreSQLSettings"}, + "MySQLSettings":{"shape":"MySQLSettings"}, + "OracleSettings":{"shape":"OracleSettings"}, + "SybaseSettings":{"shape":"SybaseSettings"}, + "MicrosoftSQLServerSettings":{"shape":"MicrosoftSQLServerSettings"}, + "IBMDb2Settings":{"shape":"IBMDb2Settings"} } }, "ModifyEndpointResponse":{ @@ -1717,6 +1944,16 @@ "KmsKeyId":{"shape":"String"} } }, + "MySQLSettings":{ + "type":"structure", + "members":{ + "DatabaseName":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "Port":{"shape":"IntegerOptional"}, + "ServerName":{"shape":"String"}, + "Username":{"shape":"String"} + } + }, "NeptuneSettings":{ "type":"structure", "required":[ @@ -1740,6 +1977,21 @@ "one" ] }, + "OracleSettings":{ + "type":"structure", + "members":{ + "AsmPassword":{"shape":"SecretString"}, + "AsmServer":{"shape":"String"}, + "AsmUser":{"shape":"String"}, + "DatabaseName":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "Port":{"shape":"IntegerOptional"}, + "SecurityDbEncryption":{"shape":"SecretString"}, + "SecurityDbEncryptionName":{"shape":"String"}, + "ServerName":{"shape":"String"}, + "Username":{"shape":"String"} + } + }, "OrderableReplicationInstance":{ "type":"structure", "members":{ @@ -1784,6 +2036,16 @@ "type":"list", "member":{"shape":"ResourcePendingMaintenanceActions"} }, + "PostgreSQLSettings":{ + "type":"structure", + "members":{ + "DatabaseName":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "Port":{"shape":"IntegerOptional"}, + "ServerName":{"shape":"String"}, + "Username":{"shape":"String"} + } + }, "RebootReplicationInstanceMessage":{ "type":"structure", "required":["ReplicationInstanceArn"], @@ -2042,6 +2304,48 @@ "type":"list", "member":{"shape":"ReplicationTaskAssessmentResult"} }, + "ReplicationTaskAssessmentRun":{ + "type":"structure", + "members":{ + "ReplicationTaskAssessmentRunArn":{"shape":"String"}, + "ReplicationTaskArn":{"shape":"String"}, + "Status":{"shape":"String"}, + "ReplicationTaskAssessmentRunCreationDate":{"shape":"TStamp"}, + "AssessmentProgress":{"shape":"ReplicationTaskAssessmentRunProgress"}, + "LastFailureMessage":{"shape":"String"}, + "ServiceAccessRoleArn":{"shape":"String"}, + "ResultLocationBucket":{"shape":"String"}, + "ResultLocationFolder":{"shape":"String"}, + "ResultEncryptionMode":{"shape":"String"}, + "ResultKmsKeyArn":{"shape":"String"}, + "AssessmentRunName":{"shape":"String"} + } + }, + "ReplicationTaskAssessmentRunList":{ + "type":"list", + "member":{"shape":"ReplicationTaskAssessmentRun"} + }, + "ReplicationTaskAssessmentRunProgress":{ + "type":"structure", + "members":{ + "IndividualAssessmentCount":{"shape":"Integer"}, + "IndividualAssessmentCompletedCount":{"shape":"Integer"} + } + }, + "ReplicationTaskIndividualAssessment":{ + "type":"structure", + "members":{ + "ReplicationTaskIndividualAssessmentArn":{"shape":"String"}, + "ReplicationTaskAssessmentRunArn":{"shape":"String"}, + "IndividualAssessmentName":{"shape":"String"}, + "Status":{"shape":"String"}, + "ReplicationTaskIndividualAssessmentStartDate":{"shape":"TStamp"} + } + }, + "ReplicationTaskIndividualAssessmentList":{ + "type":"list", + "member":{"shape":"ReplicationTaskIndividualAssessment"} + }, "ReplicationTaskList":{ "type":"list", "member":{"shape":"ReplicationTask"} @@ -2092,6 +2396,20 @@ }, "exception":true }, + "S3AccessDeniedFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, + "S3ResourceNotFoundFault":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, "S3Settings":{ "type":"structure", "members":{ @@ -2161,6 +2479,32 @@ "ReplicationTask":{"shape":"ReplicationTask"} } }, + "StartReplicationTaskAssessmentRunMessage":{ + "type":"structure", + "required":[ + "ReplicationTaskArn", + "ServiceAccessRoleArn", + "ResultLocationBucket", + "AssessmentRunName" + ], + "members":{ + "ReplicationTaskArn":{"shape":"String"}, + "ServiceAccessRoleArn":{"shape":"String"}, + "ResultLocationBucket":{"shape":"String"}, + "ResultLocationFolder":{"shape":"String"}, + "ResultEncryptionMode":{"shape":"String"}, + "ResultKmsKeyArn":{"shape":"String"}, + "AssessmentRunName":{"shape":"String"}, + "IncludeOnly":{"shape":"IncludeTestList"}, + "Exclude":{"shape":"ExcludeTestList"} + } + }, + "StartReplicationTaskAssessmentRunResponse":{ + "type":"structure", + "members":{ + "ReplicationTaskAssessmentRun":{"shape":"ReplicationTaskAssessmentRun"} + } + }, "StartReplicationTaskMessage":{ "type":"structure", "required":[ @@ -2247,6 +2591,16 @@ "type":"list", "member":{"shape":"SupportedEndpointType"} }, + "SybaseSettings":{ + "type":"structure", + "members":{ + "DatabaseName":{"shape":"String"}, + "Password":{"shape":"SecretString"}, + "Port":{"shape":"IntegerOptional"}, + "ServerName":{"shape":"String"}, + "Username":{"shape":"String"} + } + }, "TStamp":{"type":"timestamp"}, "TableListToReload":{ "type":"list", @@ -2282,6 +2636,10 @@ }, "TableToReload":{ "type":"structure", + "required":[ + "SchemaName", + "TableName" + ], "members":{ "SchemaName":{"shape":"String"}, "TableName":{"shape":"String"} diff --git a/models/apis/dms/2016-01-01/docs-2.json b/models/apis/dms/2016-01-01/docs-2.json index 2f7109754f8..df4ac73eaa2 100644 --- a/models/apis/dms/2016-01-01/docs-2.json +++ b/models/apis/dms/2016-01-01/docs-2.json @@ -4,6 +4,7 @@ "operations": { "AddTagsToResource": "

Adds metadata tags to an AWS DMS resource, including replication instance, endpoint, security group, and migration task. These tags can also be used with cost allocation reporting to track cost associated with DMS resources, or used in a Condition statement in an IAM policy for DMS.

", "ApplyPendingMaintenanceAction": "

Applies a pending maintenance action to a resource (for example, to a replication instance).

", + "CancelReplicationTaskAssessmentRun": "

Cancels a single premigration assessment run.

This operation prevents any individual assessments from running if they haven't started running. It also attempts to cancel any individual assessments that are currently running.

", "CreateEndpoint": "

Creates an endpoint using the provided settings.

", "CreateEventSubscription": "

Creates an AWS DMS event notification subscription.

You can specify the type of source (SourceType) you want to be notified of, provide a list of AWS DMS source IDs (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. If you specify both the SourceType and SourceIds, such as SourceType = replication-instance and SourceIdentifier = my-replinstance, you will be notified of all the replication instance events for the specified source. If you specify a SourceType but don't specify a SourceIdentifier, you receive notice of the events for that source type for all your AWS DMS sources. If you don't specify either SourceType nor SourceIdentifier, you will be notified of events generated from all AWS DMS sources belonging to your customer account.

For more information about AWS DMS events, see Working with Events and Notifications in the AWS Database Migration Service User Guide.

", "CreateReplicationInstance": "

Creates the replication instance using the specified parameters.

AWS DMS requires that your account have certain roles with appropriate permissions before you can create a replication instance. For information on the required roles, see Creating the IAM Roles to Use With the AWS CLI and AWS DMS API. For information on the required permissions, see IAM Permissions Needed to Use AWS DMS.

", @@ -16,7 +17,9 @@ "DeleteReplicationInstance": "

Deletes the specified replication instance.

You must delete any migration tasks that are associated with the replication instance before you can delete it.

", "DeleteReplicationSubnetGroup": "

Deletes a subnet group.

", "DeleteReplicationTask": "

Deletes the specified replication task.

", + "DeleteReplicationTaskAssessmentRun": "

Deletes the record of a single premigration assessment run.

This operation removes all metadata that AWS DMS maintains about this assessment run. However, the operation leaves untouched all information about this assessment run that is stored in your Amazon S3 bucket.

", "DescribeAccountAttributes": "

Lists all of the AWS DMS attributes for a customer account. These attributes include AWS DMS quotas for the account and a unique account identifier in a particular DMS region. DMS quotas include a list of resource quotas supported by the account, such as the number of replication instances allowed. The description for each resource quota, includes the quota name, current usage toward that quota, and the quota's maximum value. DMS uses the unique account identifier to name each artifact used by DMS in the given region.

This command does not take any parameters.

", + "DescribeApplicableIndividualAssessments": "

Provides a list of individual assessments that you can specify for a new premigration assessment run, given one or more parameters.

If you specify an existing migration task, this operation provides the default individual assessments you can specify for that task. Otherwise, the specified parameters model elements of a possible migration task on which to base a premigration assessment run.

To use these migration task modeling parameters, you must specify an existing replication instance, a source database engine, a target database engine, and a migration type. This combination of parameters potentially limits the default individual assessments available for an assessment run created for a corresponding migration task.

If you specify no parameters, this operation provides a list of all possible individual assessments that you can specify for an assessment run. If you specify any one of the task modeling parameters, you must specify all of them or the operation cannot provide a list of individual assessments. The only parameter that you can specify alone is for an existing migration task. The specified task definition then determines the default list of individual assessments that you can specify in an assessment run for the task.

", "DescribeCertificates": "

Provides a description of the certificate.

", "DescribeConnections": "

Describes the status of the connections that have been made between the replication instance and an endpoint. Connections are created when you test an endpoint.

", "DescribeEndpointTypes": "

Returns information about the type of endpoints available.

", @@ -31,6 +34,8 @@ "DescribeReplicationInstances": "

Returns information about replication instances for your account in the current region.

", "DescribeReplicationSubnetGroups": "

Returns information about the replication subnet groups.

", "DescribeReplicationTaskAssessmentResults": "

Returns the task assessment results from Amazon S3. This action always returns the latest results.

", + "DescribeReplicationTaskAssessmentRuns": "

Returns a paginated list of premigration assessment runs based on filter settings.

These filter settings can specify a combination of premigration assessment runs, migration tasks, replication instances, and assessment run status values.

This operation doesn't return information about individual assessments. For this information, see the DescribeReplicationTaskIndividualAssessments operation.

", + "DescribeReplicationTaskIndividualAssessments": "

Returns a paginated list of individual assessments based on filter settings.

These filter settings can specify a combination of premigration assessment runs, migration tasks, and assessment status values.

", "DescribeReplicationTasks": "

Returns information about replication tasks for your account in the current region.

", "DescribeSchemas": "

Returns information about the schema for the specified endpoint.

", "DescribeTableStatistics": "

Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.

Note that the \"last updated\" column the DMS console only indicates the time that AWS DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.

", @@ -47,7 +52,8 @@ "RemoveTagsFromResource": "

Removes metadata tags from a DMS resource.

", "StartReplicationTask": "

Starts the replication task.

For more information about AWS DMS tasks, see Working with Migration Tasks in the AWS Database Migration Service User Guide.

", "StartReplicationTaskAssessment": "

Starts the replication task assessment for unsupported data types in the source database.

", - "StopReplicationTask": "

Stops the replication task.

", + "StartReplicationTaskAssessmentRun": "

Starts a new premigration assessment run for one or more individual assessments of a migration task.

The assessments that you can specify depend on the source and target database engine and the migration type defined for the given task. To run this operation, your migration task must already be created. After you run this operation, you can review the status of each individual assessment. You can also run the migration task manually after the assessment run and its individual assessments complete.

", + "StopReplicationTask": "

Stops the replication task.

", "TestConnection": "

Tests the connection between the replication instance and the endpoint.

" }, "shapes": { @@ -132,6 +138,11 @@ "CreateReplicationInstanceMessage$AutoMinorVersionUpgrade": "

A value that indicates whether minor engine upgrades are applied automatically to the replication instance during the maintenance window. This parameter defaults to true.

Default: true

", "CreateReplicationInstanceMessage$PubliclyAccessible": "

Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true.

", "DescribeReplicationTasksMessage$WithoutSettings": "

An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default).

", + "KafkaSettings$IncludeTransactionDetails": "

Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). The default is False.

", + "KafkaSettings$IncludePartitionValue": "

Shows the partition value within the Kafka message output, unless the partition type is schema-table-type. The default is False.

", + "KafkaSettings$PartitionIncludeSchemaTable": "

Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. The default is False.

", + "KafkaSettings$IncludeTableAlterOperations": "

Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. The default is False.

", + "KafkaSettings$IncludeControlDetails": "

Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. The default is False.

", "KinesisSettings$IncludeTransactionDetails": "

Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). The default is False.

", "KinesisSettings$IncludePartitionValue": "

Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. The default is False.

", "KinesisSettings$PartitionIncludeSchemaTable": "

Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kinesis shards. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same shard, which causes throttling. The default is False.

", @@ -156,6 +167,16 @@ "TableStatistics$FullLoadReloaded": "

A value that indicates if the table was reloaded (true) or loaded as part of a new full load operation (false).

" } }, + "CancelReplicationTaskAssessmentRunMessage": { + "base": "

", + "refs": { + } + }, + "CancelReplicationTaskAssessmentRunResponse": { + "base": "

", + "refs": { + } + }, "Certificate": { "base": "

The SSL certificate that can be used to encrypt connections between the endpoints and the replication instance.

", "refs": { @@ -313,6 +334,16 @@ "refs": { } }, + "DeleteReplicationTaskAssessmentRunMessage": { + "base": "

", + "refs": { + } + }, + "DeleteReplicationTaskAssessmentRunResponse": { + "base": "

", + "refs": { + } + }, "DeleteReplicationTaskMessage": { "base": "

", "refs": { @@ -333,6 +364,16 @@ "refs": { } }, + "DescribeApplicableIndividualAssessmentsMessage": { + "base": "

", + "refs": { + } + }, + "DescribeApplicableIndividualAssessmentsResponse": { + "base": "

", + "refs": { + } + }, "DescribeCertificatesMessage": { "base": null, "refs": { @@ -473,6 +514,26 @@ "refs": { } }, + "DescribeReplicationTaskAssessmentRunsMessage": { + "base": "

", + "refs": { + } + }, + "DescribeReplicationTaskAssessmentRunsResponse": { + "base": "

", + "refs": { + } + }, + "DescribeReplicationTaskIndividualAssessmentsMessage": { + "base": "

", + "refs": { + } + }, + "DescribeReplicationTaskIndividualAssessmentsResponse": { + "base": "

", + "refs": { + } + }, "DescribeReplicationTasksMessage": { "base": "

", "refs": { @@ -523,7 +584,7 @@ "base": "

Provides the Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role used to define an Amazon DynamoDB target endpoint.

", "refs": { "CreateEndpointMessage$DynamoDbSettings": "

Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.

", - "Endpoint$DynamoDbSettings": "

The settings for the target DynamoDB database. For more information, see the DynamoDBSettings structure.

", + "Endpoint$DynamoDbSettings": "

The settings for the DynamoDB target endpoint. For more information, see the DynamoDBSettings structure.

", "ModifyEndpointMessage$DynamoDbSettings": "

Settings in JSON format for the target Amazon DynamoDB endpoint. For information about other available settings, see Using Object Mapping to Migrate Data to DynamoDB in the AWS Database Migration Service User Guide.

" } }, @@ -623,6 +684,7 @@ "InvalidSubnet$message": "

", "KMSAccessDeniedFault$message": null, "KMSDisabledFault$message": null, + "KMSFault$message": null, "KMSInvalidStateFault$message": null, "KMSKeyNotAccessibleFault$message": "

", "KMSNotFoundFault$message": null, @@ -631,6 +693,8 @@ "ResourceAlreadyExistsFault$message": "

", "ResourceNotFoundFault$message": "

", "ResourceQuotaExceededFault$message": "

", + "S3AccessDeniedFault$message": null, + "S3ResourceNotFoundFault$message": null, "SNSInvalidTopicFault$message": "

", "SNSNoAuthorizationFault$message": "

", "StorageQuotaExceededFault$message": "

", @@ -638,8 +702,14 @@ "UpgradeDependencyFailureFault$message": "

" } }, + "ExcludeTestList": { + "base": null, + "refs": { + "StartReplicationTaskAssessmentRunMessage$Exclude": "

Space-separated list of names for specific individual assessments that you want to exclude. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn.

You can't set a value for Exclude if you also set a value for IncludeOnly in the API operation.

To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.

" + } + }, "Filter": { - "base": "

Identifies the name and value of a source filter object used to limit the number and type of records transferred from your source to your target.

", + "base": "

Identifies the name and value of a filter object. This filter is used to limit the number and type of AWS DMS objects that are returned for a particular Describe* or similar operation.

", "refs": { "FilterList$member": null } @@ -647,24 +717,34 @@ "FilterList": { "base": null, "refs": { - "DescribeCertificatesMessage$Filters": "

Filters applied to the certificate described in the form of key-value pairs.

", + "DescribeCertificatesMessage$Filters": "

Filters applied to the certificates described in the form of key-value pairs.

", "DescribeConnectionsMessage$Filters": "

The filters applied to the connection.

Valid filter names: endpoint-arn | replication-instance-arn

", - "DescribeEndpointTypesMessage$Filters": "

Filters applied to the describe action.

Valid filter names: engine-name | endpoint-type

", - "DescribeEndpointsMessage$Filters": "

Filters applied to the describe action.

Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name

", - "DescribeEventCategoriesMessage$Filters": "

Filters applied to the action.

", - "DescribeEventSubscriptionsMessage$Filters": "

Filters applied to the action.

", - "DescribeEventsMessage$Filters": "

Filters applied to the action.

", + "DescribeEndpointTypesMessage$Filters": "

Filters applied to the endpoint types.

Valid filter names: engine-name | endpoint-type

", + "DescribeEndpointsMessage$Filters": "

Filters applied to the endpoints.

Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name

", + "DescribeEventCategoriesMessage$Filters": "

Filters applied to the event categories.

", + "DescribeEventSubscriptionsMessage$Filters": "

Filters applied to event subscriptions.

", + "DescribeEventsMessage$Filters": "

Filters applied to events.

", "DescribePendingMaintenanceActionsMessage$Filters": "

", - "DescribeReplicationInstancesMessage$Filters": "

Filters applied to the describe action.

Valid filter names: replication-instance-arn | replication-instance-id | replication-instance-class | engine-version

", - "DescribeReplicationSubnetGroupsMessage$Filters": "

Filters applied to the describe action.

Valid filter names: replication-subnet-group-id

", - "DescribeReplicationTasksMessage$Filters": "

Filters applied to the describe action.

Valid filter names: replication-task-arn | replication-task-id | migration-type | endpoint-arn | replication-instance-arn

", - "DescribeTableStatisticsMessage$Filters": "

Filters applied to the describe table statistics action.

Valid filter names: schema-name | table-name | table-state

A combination of filters creates an AND condition where each record matches all specified filters.

" + "DescribeReplicationInstancesMessage$Filters": "

Filters applied to replication instances.

Valid filter names: replication-instance-arn | replication-instance-id | replication-instance-class | engine-version

", + "DescribeReplicationSubnetGroupsMessage$Filters": "

Filters applied to replication subnet groups.

Valid filter names: replication-subnet-group-id

", + "DescribeReplicationTaskAssessmentRunsMessage$Filters": "

Filters applied to the premigration assessment runs described in the form of key-value pairs.

Valid filter names: replication-task-assessment-run-arn, replication-task-arn, replication-instance-arn, status

", + "DescribeReplicationTaskIndividualAssessmentsMessage$Filters": "

Filters applied to the individual assessments described in the form of key-value pairs.

Valid filter names: replication-task-assessment-run-arn, replication-task-arn, status

", + "DescribeReplicationTasksMessage$Filters": "

Filters applied to replication tasks.

Valid filter names: replication-task-arn | replication-task-id | migration-type | endpoint-arn | replication-instance-arn

", + "DescribeTableStatisticsMessage$Filters": "

Filters applied to table statistics.

Valid filter names: schema-name | table-name | table-state

A combination of filters creates an AND condition where each record matches all specified filters.

" } }, "FilterValueList": { "base": null, "refs": { - "Filter$Values": "

The filter value.

" + "Filter$Values": "

The filter value, which can specify one or more values used to narrow the returned results.

" + } + }, + "IBMDb2Settings": { + "base": "

Provides information that defines an IBM Db2 LUW endpoint.

", + "refs": { + "CreateEndpointMessage$IBMDb2Settings": "

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.

", + "Endpoint$IBMDb2Settings": "

The settings for the IBM Db2 LUW source endpoint. For more information, see the IBMDb2Settings structure.

", + "ModifyEndpointMessage$IBMDb2Settings": "

Settings in JSON format for the source IBM Db2 LUW endpoint. For information about other available settings, see Extra connection attributes when using Db2 LUW as a source for AWS DMS in the AWS Database Migration Service User Guide.

" } }, "ImportCertificateMessage": { @@ -677,6 +757,18 @@ "refs": { } }, + "IncludeTestList": { + "base": null, + "refs": { + "StartReplicationTaskAssessmentRunMessage$IncludeOnly": "

Space-separated list of names for specific individual assessments that you want to include. These names come from the default list of individual assessments that AWS DMS supports for the associated migration task. This task is specified by ReplicationTaskArn.

You can't set a value for IncludeOnly if you also set a value for Exclude in the API operation.

To identify the names of the default individual assessments that AWS DMS supports for the associated migration task, run the DescribeApplicableIndividualAssessments operation using its own ReplicationTaskArn request parameter.

" + } + }, + "IndividualAssessmentNameList": { + "base": null, + "refs": { + "DescribeApplicableIndividualAssessmentsResponse$IndividualAssessmentNames": "

List of names for the individual assessments supported by the premigration assessment run that you start based on the specified request parameters. For more information on the available individual assessments, including compatibility with different migration task configurations, see Working with premigration assessment runs in the AWS Database Migration Service User Guide.

" + } + }, "InsufficientResourceCapacityFault": { "base": "

There are not enough resources allocated to the database migration.

", "refs": { @@ -690,6 +782,8 @@ "OrderableReplicationInstance$DefaultAllocatedStorage": "

The default amount of storage (in gigabytes) that is allocated for the replication instance.

", "OrderableReplicationInstance$IncludedAllocatedStorage": "

The amount of storage (in gigabytes) that is allocated for the replication instance.

", "ReplicationInstance$AllocatedStorage": "

The amount of storage (in gigabytes) that is allocated for the replication instance.

", + "ReplicationTaskAssessmentRunProgress$IndividualAssessmentCount": "

The number of individual assessments that are specified to run.

", + "ReplicationTaskAssessmentRunProgress$IndividualAssessmentCompletedCount": "

The number of individual assessments that have completed, successfully or not.

", "ReplicationTaskStats$FullLoadProgressPercent": "

The percent complete for the full load migration task.

", "ReplicationTaskStats$TablesLoaded": "

The number of tables loaded for this task.

", "ReplicationTaskStats$TablesLoading": "

The number of tables currently loading for this task.

", @@ -703,6 +797,7 @@ "Certificate$KeyLength": "

The key length of the cryptographic algorithm being used.

", "CreateEndpointMessage$Port": "

The port used by the endpoint database.

", "CreateReplicationInstanceMessage$AllocatedStorage": "

The amount of storage (in gigabytes) to be initially allocated for the replication instance.

", + "DescribeApplicableIndividualAssessmentsMessage$MaxRecords": "

Maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

", "DescribeCertificatesMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 10

", "DescribeConnectionsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeEndpointTypesMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", @@ -716,18 +811,25 @@ "DescribeReplicationInstancesMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeReplicationSubnetGroupsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeReplicationTaskAssessmentResultsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", + "DescribeReplicationTaskAssessmentRunsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

", + "DescribeReplicationTaskIndividualAssessmentsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

", "DescribeReplicationTasksMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeSchemasMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 100.

", "DescribeTableStatisticsMessage$MaxRecords": "

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved.

Default: 100

Constraints: Minimum 20, maximum 500.

", "ElasticsearchSettings$FullLoadErrorPercentage": "

The maximum percentage of records that can fail to be written before a full load operation stops.

", "ElasticsearchSettings$ErrorRetryDuration": "

The maximum number of seconds for which DMS retries failed API requests to the Elasticsearch cluster.

", "Endpoint$Port": "

The port value used to access the endpoint.

", + "IBMDb2Settings$Port": "

Endpoint TCP port.

", + "MicrosoftSQLServerSettings$Port": "

Endpoint TCP port.

", "ModifyEndpointMessage$Port": "

The port used by the endpoint database.

", "ModifyReplicationInstanceMessage$AllocatedStorage": "

The amount of storage (in gigabytes) to be allocated for the replication instance.

", "MongoDbSettings$Port": "

The port value for the MongoDB source endpoint.

", + "MySQLSettings$Port": "

Endpoint TCP port.

", "NeptuneSettings$ErrorRetryDuration": "

The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated graph data to the Neptune target database before raising an error. The default is 250.

", "NeptuneSettings$MaxFileSize": "

The maximum size in kilobytes of migrated graph data stored in a .csv file before AWS DMS bulk-loads the data to the Neptune target database. The default is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, ready to store the next batch of migrated graph data.

", "NeptuneSettings$MaxRetryCount": "

The number of times for AWS DMS to retry a bulk load of migrated graph data to the Neptune target database before raising an error. The default is 5.

", + "OracleSettings$Port": "

Endpoint TCP port.

", + "PostgreSQLSettings$Port": "

Endpoint TCP port.

", "RedshiftSettings$ConnectionTimeout": "

A value that sets the amount of time to wait (in milliseconds) before timing out, beginning from when you initially establish a connection.

", "RedshiftSettings$FileTransferUploadStreams": "

The number of threads used to upload a single file. This parameter accepts a value from 1 through 64. It defaults to 10.

", "RedshiftSettings$LoadTimeout": "

The amount of time to wait (in milliseconds) before timing out, beginning from when you begin loading.

", @@ -737,7 +839,8 @@ "ReplicationPendingModifiedValues$AllocatedStorage": "

The amount of storage (in gigabytes) that is allocated for the replication instance.

", "S3Settings$DictPageSizeLimit": "

The maximum size of an encoded dictionary page of a column. If the dictionary page exceeds this, this column is stored using an encoding type of PLAIN. This parameter defaults to 1024 * 1024 bytes (1 MiB), the maximum size of a dictionary page before it reverts to PLAIN encoding. This size is used for .parquet file format only.

", "S3Settings$RowGroupLength": "

The number of rows in a row group. A smaller row group size provides faster reads. But as the number of row groups grows, the slower writes become. This parameter defaults to 10,000 rows. This number is used for .parquet file format only.

If you choose a value larger than the maximum, RowGroupLength is set to the max row group length in bytes (64 * 1024 * 1024).

", - "S3Settings$DataPageSize": "

The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for .parquet file format only.

" + "S3Settings$DataPageSize": "

The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes (1 MiB). This number is used for .parquet file format only.

", + "SybaseSettings$Port": "

Endpoint TCP port.

" } }, "InvalidCertificateFault": { @@ -765,6 +868,11 @@ "refs": { } }, + "KMSFault": { + "base": "

An AWS Key Management Service (AWS KMS) error is preventing access to AWS KMS.

", + "refs": { + } + }, "KMSInvalidStateFault": { "base": "

The state of the specified AWS KMS resource isn't valid for this request.

", "refs": { @@ -839,13 +947,23 @@ "MessageFormatValue": { "base": null, "refs": { + "KafkaSettings$MessageFormat": "

The output format for the records created on the endpoint. The message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

", "KinesisSettings$MessageFormat": "

The output format for the records created on the endpoint. The message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).

" } }, + "MicrosoftSQLServerSettings": { + "base": "

Provides information that defines a Microsoft SQL Server endpoint.

", + "refs": { + "CreateEndpointMessage$MicrosoftSQLServerSettings": "

Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.

", + "Endpoint$MicrosoftSQLServerSettings": "

The settings for the Microsoft SQL Server source and target endpoint. For more information, see the MicrosoftSQLServerSettings structure.

", + "ModifyEndpointMessage$MicrosoftSQLServerSettings": "

Settings in JSON format for the source and target Microsoft SQL Server endpoint. For information about other available settings, see Extra connection attributes when using SQL Server as a source for AWS DMS and Extra connection attributes when using SQL Server as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + } + }, "MigrationTypeValue": { "base": null, "refs": { "CreateReplicationTaskMessage$MigrationType": "

The migration type. Valid values: full-load | cdc | full-load-and-cdc

", + "DescribeApplicableIndividualAssessmentsMessage$MigrationType": "

Name of the migration type that each provided individual assessment must support.

", "ModifyReplicationTaskMessage$MigrationType": "

The migration type. Valid values: full-load | cdc | full-load-and-cdc

", "ReplicationTask$MigrationType": "

The type of migration.

" } @@ -908,6 +1026,14 @@ "ModifyEndpointMessage$MongoDbSettings": "

Settings in JSON format for the source MongoDB endpoint. For more information about the available settings, see the configuration properties section in Using MongoDB as a Target for AWS Database Migration Service in the AWS Database Migration Service User Guide.

" } }, + "MySQLSettings": { + "base": "

Provides information that defines a MySQL endpoint.

", + "refs": { + "CreateEndpointMessage$MySQLSettings": "

Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.

", + "Endpoint$MySQLSettings": "

The settings for the MySQL source and target endpoint. For more information, see the MySQLSettings structure.

", + "ModifyEndpointMessage$MySQLSettings": "

Settings in JSON format for the source and target MySQL endpoint. For information about other available settings, see Extra connection attributes when using MySQL as a source for AWS DMS and Extra connection attributes when using a MySQL-compatible database as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + } + }, "NeptuneSettings": { "base": "

Provides information that defines an Amazon Neptune endpoint.

", "refs": { @@ -922,6 +1048,14 @@ "MongoDbSettings$NestingLevel": "

Specifies either document or table mode.

Default value is \"none\". Specify \"none\" to use document mode. Specify \"one\" to use table mode.

" } }, + "OracleSettings": { + "base": "

Provides information that defines an Oracle endpoint.

", + "refs": { + "CreateEndpointMessage$OracleSettings": "

Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.

", + "Endpoint$OracleSettings": "

The settings for the Oracle source and target endpoint. For more information, see the OracleSettings structure.

", + "ModifyEndpointMessage$OracleSettings": "

Settings in JSON format for the source and target Oracle endpoint. For information about other available settings, see Extra connection attributes when using Oracle as a source for AWS DMS and Extra connection attributes when using Oracle as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + } + }, "OrderableReplicationInstance": { "base": "

In response to the DescribeOrderableReplicationInstances operation, this object describes an available replication instance. This description includes the replication instance's type, engine version, and allocated storage.

", "refs": { @@ -958,6 +1092,14 @@ "DescribePendingMaintenanceActionsResponse$PendingMaintenanceActions": "

The pending maintenance action.

" } }, + "PostgreSQLSettings": { + "base": "

Provides information that defines a PostgreSQL endpoint.

", + "refs": { + "CreateEndpointMessage$PostgreSQLSettings": "

Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.

", + "Endpoint$PostgreSQLSettings": "

The settings for the PostgreSQL source and target endpoint. For more information, see the PostgreSQLSettings structure.

", + "ModifyEndpointMessage$PostgreSQLSettings": "

Settings in JSON format for the source and target PostgreSQL endpoint. For information about other available settings, see Extra connection attributes when using PostgreSQL as a source for AWS DMS and Extra connection attributes when using PostgreSQL as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + } + }, "RebootReplicationInstanceMessage": { "base": null, "refs": { @@ -1130,6 +1272,39 @@ "DescribeReplicationTaskAssessmentResultsResponse$ReplicationTaskAssessmentResults": "

The task assessment report.

" } }, + "ReplicationTaskAssessmentRun": { + "base": "

Provides information that describes a premigration assessment run that you have started using the StartReplicationTaskAssessmentRun operation.

Some of the information appears based on other operations that can return the ReplicationTaskAssessmentRun object.

", + "refs": { + "CancelReplicationTaskAssessmentRunResponse$ReplicationTaskAssessmentRun": "

The ReplicationTaskAssessmentRun object for the canceled assessment run.

", + "DeleteReplicationTaskAssessmentRunResponse$ReplicationTaskAssessmentRun": "

The ReplicationTaskAssessmentRun object for the deleted assessment run.

", + "ReplicationTaskAssessmentRunList$member": null, + "StartReplicationTaskAssessmentRunResponse$ReplicationTaskAssessmentRun": "

The premigration assessment run that was started.

" + } + }, + "ReplicationTaskAssessmentRunList": { + "base": null, + "refs": { + "DescribeReplicationTaskAssessmentRunsResponse$ReplicationTaskAssessmentRuns": "

One or more premigration assessment runs as specified by Filters.

" + } + }, + "ReplicationTaskAssessmentRunProgress": { + "base": "

The progress values reported by the AssessmentProgress response element.

", + "refs": { + "ReplicationTaskAssessmentRun$AssessmentProgress": "

Indication of the completion progress for the individual assessments specified to run.

" + } + }, + "ReplicationTaskIndividualAssessment": { + "base": "

Provides information that describes an individual assessment from a premigration assessment run.

", + "refs": { + "ReplicationTaskIndividualAssessmentList$member": null + } + }, + "ReplicationTaskIndividualAssessmentList": { + "base": null, + "refs": { + "DescribeReplicationTaskIndividualAssessmentsResponse$ReplicationTaskIndividualAssessments": "

One or more individual assessments as specified by Filters.

" + } + }, "ReplicationTaskList": { "base": null, "refs": { @@ -1170,6 +1345,16 @@ "refs": { } }, + "S3AccessDeniedFault": { + "base": "

Insufficient privileges are preventing access to an Amazon S3 object.

", + "refs": { + } + }, + "S3ResourceNotFoundFault": { + "base": "

A specified Amazon S3 bucket, bucket folder, or other object can't be found.

", + "refs": { + } + }, "S3Settings": { "base": "

Settings for exporting data to Amazon S3.

", "refs": { @@ -1198,9 +1383,17 @@ "base": null, "refs": { "CreateEndpointMessage$Password": "

The password to be used to log in to the endpoint database.

", + "IBMDb2Settings$Password": "

Endpoint connection password.

", + "MicrosoftSQLServerSettings$Password": "

Endpoint connection password.

", "ModifyEndpointMessage$Password": "

The password to be used to login to the endpoint database.

", "MongoDbSettings$Password": "

The password for the user account you use to access the MongoDB source endpoint.

", - "RedshiftSettings$Password": "

The password for the user named in the username property.

" + "MySQLSettings$Password": "

Endpoint connection password.

", + "OracleSettings$AsmPassword": "

For an Oracle source endpoint, your Oracle Automatic Storage Management (ASM) password. You can set this value from the asm_user_password value. You set this value as part of the comma-separated value that you set to the Password request parameter when you create the endpoint to access transaction logs using Binary Reader. For more information, see Configuration for change data capture (CDC) on an Oracle source database.

", + "OracleSettings$Password": "

Endpoint connection password.

", + "OracleSettings$SecurityDbEncryption": "

For an Oracle source endpoint, the transparent data encryption (TDE) password required by AWM DMS to access Oracle redo logs encrypted by TDE using Binary Reader. It is also the TDE_Password part of the comma-separated value you set to the Password request parameter when you create the endpoint. The SecurityDbEncryptian setting is related to this SecurityDbEncryptionName setting. For more information, see Supported encryption methods for using Oracle as a source for AWS DMS in the AWS Database Migration Service User Guide.

", + "PostgreSQLSettings$Password": "

Endpoint connection password.

", + "RedshiftSettings$Password": "

The password for the user named in the username property.

", + "SybaseSettings$Password": "

Endpoint connection password.

" } }, "SourceIdsList": { @@ -1227,6 +1420,16 @@ "refs": { } }, + "StartReplicationTaskAssessmentRunMessage": { + "base": "

", + "refs": { + } + }, + "StartReplicationTaskAssessmentRunResponse": { + "base": "

", + "refs": { + } + }, "StartReplicationTaskMessage": { "base": "

", "refs": { @@ -1268,6 +1471,7 @@ "ApplyPendingMaintenanceActionMessage$OptInType": "

A value that specifies the type of opt-in request, or undoes an opt-in request. You can't undo an opt-in request of type immediate.

Valid values:

", "AvailabilityZone$Name": "

The name of the Availability Zone.

", "AvailabilityZonesList$member": null, + "CancelReplicationTaskAssessmentRunMessage$ReplicationTaskAssessmentRunArn": "

Amazon Resource Name (ARN) of the premigration assessment run to be canceled.

", "Certificate$CertificateIdentifier": "

A customer-assigned name for the certificate. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.

", "Certificate$CertificatePem": "

The contents of a .pem file, which contains an X.509 certificate.

", "Certificate$CertificateArn": "

The Amazon Resource Name (ARN) for the certificate.

", @@ -1318,8 +1522,15 @@ "DeleteEventSubscriptionMessage$SubscriptionName": "

The name of the DMS event notification subscription to be deleted.

", "DeleteReplicationInstanceMessage$ReplicationInstanceArn": "

The Amazon Resource Name (ARN) of the replication instance to be deleted.

", "DeleteReplicationSubnetGroupMessage$ReplicationSubnetGroupIdentifier": "

The subnet group name of the replication instance.

", + "DeleteReplicationTaskAssessmentRunMessage$ReplicationTaskAssessmentRunArn": "

Amazon Resource Name (ARN) of the premigration assessment run to be deleted.

", "DeleteReplicationTaskMessage$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task to be deleted.

", "DescribeAccountAttributesResponse$UniqueAccountIdentifier": "

A unique AWS DMS identifier for an account in a particular AWS Region. The value of this identifier has the following format: c99999999999. DMS uses this identifier to name artifacts. For example, DMS uses this identifier to name the default Amazon S3 bucket for storing task assessment reports in a given AWS Region. The format of this S3 bucket name is the following: dms-AccountNumber-UniqueAccountIdentifier. Here is an example name for this default S3 bucket: dms-111122223333-c44445555666.

AWS DMS supports the UniqueAccountIdentifier parameter in versions 3.1.4 and later.

", + "DescribeApplicableIndividualAssessmentsMessage$ReplicationTaskArn": "

Amazon Resource Name (ARN) of a migration task on which you want to base the default list of individual assessments.

", + "DescribeApplicableIndividualAssessmentsMessage$ReplicationInstanceArn": "

ARN of a replication instance on which you want to base the default list of individual assessments.

", + "DescribeApplicableIndividualAssessmentsMessage$SourceEngineName": "

Name of a database engine that the specified replication instance supports as a source.

", + "DescribeApplicableIndividualAssessmentsMessage$TargetEngineName": "

Name of a database engine that the specified replication instance supports as a target.

", + "DescribeApplicableIndividualAssessmentsMessage$Marker": "

Optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", + "DescribeApplicableIndividualAssessmentsResponse$Marker": "

Pagination token returned for you to pass to a subsequent request. If you pass this token as the Marker value in a subsequent request, the response includes only records beyond the marker, up to the value specified in the request by MaxRecords.

", "DescribeCertificatesMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeCertificatesResponse$Marker": "

The pagination token.

", "DescribeConnectionsMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", @@ -1353,6 +1564,10 @@ "DescribeReplicationTaskAssessmentResultsMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeReplicationTaskAssessmentResultsResponse$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeReplicationTaskAssessmentResultsResponse$BucketName": "

- The Amazon S3 bucket where the task assessment report is located.

", + "DescribeReplicationTaskAssessmentRunsMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", + "DescribeReplicationTaskAssessmentRunsResponse$Marker": "

A pagination token returned for you to pass to a subsequent request. If you pass this token as the Marker value in a subsequent request, the response includes only records beyond the marker, up to the value specified in the request by MaxRecords.

", + "DescribeReplicationTaskIndividualAssessmentsMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", + "DescribeReplicationTaskIndividualAssessmentsResponse$Marker": "

A pagination token returned for you to pass to a subsequent request. If you pass this token as the Marker value in a subsequent request, the response includes only records beyond the marker, up to the value specified in the request by MaxRecords.

", "DescribeReplicationTasksMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeReplicationTasksResponse$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeSchemasMessage$EndpointArn": "

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

", @@ -1391,16 +1606,25 @@ "EventSubscription$Status": "

The status of the AWS DMS event notification subscription.

Constraints:

Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist

The status \"no-permission\" indicates that AWS DMS no longer has permission to post to the SNS topic. The status \"topic-not-exist\" indicates that the topic was deleted after the subscription was created.

", "EventSubscription$SubscriptionCreationTime": "

The time the AWS DMS event notification subscription was created.

", "EventSubscription$SourceType": "

The type of AWS DMS resource that generates events.

Valid values: replication-instance | replication-server | security-group | replication-task

", - "Filter$Name": "

The name of the filter.

", + "ExcludeTestList$member": null, + "Filter$Name": "

The name of the filter as specified for a Describe* or similar operation.

", "FilterValueList$member": null, + "IBMDb2Settings$DatabaseName": "

Database name for the endpoint.

", + "IBMDb2Settings$ServerName": "

Fully qualified domain name of the endpoint.

", + "IBMDb2Settings$Username": "

Endpoint connection user name.

", "ImportCertificateMessage$CertificateIdentifier": "

A customer-assigned name for the certificate. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.

", "ImportCertificateMessage$CertificatePem": "

The contents of a .pem file, which contains an X.509 certificate.

", + "IncludeTestList$member": null, + "IndividualAssessmentNameList$member": null, "KafkaSettings$Broker": "

The broker location and port of the Kafka broker that hosts your Kafka instance. Specify the broker in the form broker-hostname-or-ip:port . For example, \"ec2-12-345-678-901.compute-1.amazonaws.com:2345\".

", "KafkaSettings$Topic": "

The topic to which you migrate the data. If you don't specify a topic, AWS DMS specifies \"kafka-default-topic\" as the migration topic.

", "KeyList$member": null, "KinesisSettings$StreamArn": "

The Amazon Resource Name (ARN) for the Amazon Kinesis Data Streams endpoint.

", "KinesisSettings$ServiceAccessRoleArn": "

The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that AWS DMS uses to write to the Kinesis data stream.

", "ListTagsForResourceMessage$ResourceArn": "

The Amazon Resource Name (ARN) string that uniquely identifies the AWS DMS resource.

", + "MicrosoftSQLServerSettings$DatabaseName": "

Database name for the endpoint.

", + "MicrosoftSQLServerSettings$ServerName": "

Fully qualified domain name of the endpoint.

", + "MicrosoftSQLServerSettings$Username": "

Endpoint connection user name.

", "ModifyEndpointMessage$EndpointArn": "

The Amazon Resource Name (ARN) string that uniquely identifies the endpoint.

", "ModifyEndpointMessage$EndpointIdentifier": "

The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen or contain two consecutive hyphens.

", "ModifyEndpointMessage$EngineName": "

The type of engine for the endpoint. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", \"sqlserver\", and \"neptune\".

", @@ -1435,15 +1659,27 @@ "MongoDbSettings$DocsToInvestigate": "

Indicates the number of documents to preview to determine the document organization. Use this setting when NestingLevel is set to \"one\".

Must be a positive value greater than 0. Default value is 1000.

", "MongoDbSettings$AuthSource": "

The MongoDB database name. This setting isn't used when AuthType is set to \"no\".

The default is \"admin\".

", "MongoDbSettings$KmsKeyId": "

The AWS KMS key identifier that is used to encrypt the content on the replication instance. If you don't specify a value for the KmsKeyId parameter, then AWS DMS uses your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.

", + "MySQLSettings$DatabaseName": "

Database name for the endpoint.

", + "MySQLSettings$ServerName": "

Fully qualified domain name of the endpoint.

", + "MySQLSettings$Username": "

Endpoint connection user name.

", "NeptuneSettings$ServiceAccessRoleArn": "

The Amazon Resource Name (ARN) of the service role that you created for the Neptune target endpoint. For more information, see Creating an IAM Service Role for Accessing Amazon Neptune as a Target in the AWS Database Migration Service User Guide.

", "NeptuneSettings$S3BucketName": "

The name of the Amazon S3 bucket where AWS DMS can temporarily store migrated graph data in .csv files before bulk-loading it to the Neptune target database. AWS DMS maps the SQL source data to graph data before storing it in these .csv files.

", "NeptuneSettings$S3BucketFolder": "

A folder path where you want AWS DMS to store migrated graph data in the S3 bucket specified by S3BucketName

", + "OracleSettings$AsmServer": "

For an Oracle source endpoint, your ASM server address. You can set this value from the asm_server value. You set asm_server as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see Configuration for change data capture (CDC) on an Oracle source database.

", + "OracleSettings$AsmUser": "

For an Oracle source endpoint, your ASM user name. You can set this value from the asm_user value. You set asm_user as part of the extra connection attribute string to access an Oracle server with Binary Reader that uses ASM. For more information, see Configuration for change data capture (CDC) on an Oracle source database.

", + "OracleSettings$DatabaseName": "

Database name for the endpoint.

", + "OracleSettings$SecurityDbEncryptionName": "

For an Oracle source endpoint, the name of a key used for the transparent data encryption (TDE) of the columns and tablespaces in an Oracle source database that is encrypted using TDE. The key value is the value of the SecurityDbEncryption setting. For more information on setting the key name value of SecurityDbEncryptionName, see the information and example for setting the securityDbEncryptionName extra connection attribute in Supported encryption methods for using Oracle as a source for AWS DMS in the AWS Database Migration Service User Guide.

", + "OracleSettings$ServerName": "

Fully qualified domain name of the endpoint.

", + "OracleSettings$Username": "

Endpoint connection user name.

", "OrderableReplicationInstance$EngineVersion": "

The version of the replication engine.

", "OrderableReplicationInstance$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right AWS DMS replication instance for your migration.

", "OrderableReplicationInstance$StorageType": "

The type of storage used by the replication instance.

", "PendingMaintenanceAction$Action": "

The type of pending maintenance action that is available for the resource.

", "PendingMaintenanceAction$OptInStatus": "

The type of opt-in request that has been received for the resource.

", "PendingMaintenanceAction$Description": "

A description providing more detail about the maintenance action.

", + "PostgreSQLSettings$DatabaseName": "

Database name for the endpoint.

", + "PostgreSQLSettings$ServerName": "

Fully qualified domain name of the endpoint.

", + "PostgreSQLSettings$Username": "

Endpoint connection user name.

", "RebootReplicationInstanceMessage$ReplicationInstanceArn": "

The Amazon Resource Name (ARN) of the replication instance.

", "RedshiftSettings$AfterConnectScript": "

Code to run after connecting. This parameter should contain the code itself, not the name of a file containing the code.

", "RedshiftSettings$BucketFolder": "

The location where the comma-separated value (.csv) files are stored before being uploaded to the S3 bucket.

", @@ -1507,6 +1743,20 @@ "ReplicationTaskAssessmentResult$AssessmentResultsFile": "

The file containing the results of the task assessment.

", "ReplicationTaskAssessmentResult$AssessmentResults": "

The task assessment results in JSON format.

", "ReplicationTaskAssessmentResult$S3ObjectUrl": "

The URL of the S3 object containing the task assessment results.

", + "ReplicationTaskAssessmentRun$ReplicationTaskAssessmentRunArn": "

Amazon Resource Name (ARN) of this assessment run.

", + "ReplicationTaskAssessmentRun$ReplicationTaskArn": "

ARN of the migration task associated with this premigration assessment run.

", + "ReplicationTaskAssessmentRun$Status": "

Assessment run status.

This status can have one of the following values:

", + "ReplicationTaskAssessmentRun$LastFailureMessage": "

Last message generated by an individual assessment failure.

", + "ReplicationTaskAssessmentRun$ServiceAccessRoleArn": "

ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun operation.

", + "ReplicationTaskAssessmentRun$ResultLocationBucket": "

Amazon S3 bucket where AWS DMS stores the results of this assessment run.

", + "ReplicationTaskAssessmentRun$ResultLocationFolder": "

Folder in an Amazon S3 bucket where AWS DMS stores the results of this assessment run.

", + "ReplicationTaskAssessmentRun$ResultEncryptionMode": "

Encryption mode used to encrypt the assessment run results.

", + "ReplicationTaskAssessmentRun$ResultKmsKeyArn": "

ARN of the AWS KMS encryption key used to encrypt the assessment run results.

", + "ReplicationTaskAssessmentRun$AssessmentRunName": "

Unique name of the assessment run.

", + "ReplicationTaskIndividualAssessment$ReplicationTaskIndividualAssessmentArn": "

Amazon Resource Name (ARN) of this individual assessment.

", + "ReplicationTaskIndividualAssessment$ReplicationTaskAssessmentRunArn": "

ARN of the premigration assessment run that is created to run this individual assessment.

", + "ReplicationTaskIndividualAssessment$IndividualAssessmentName": "

Name of this individual assessment.

", + "ReplicationTaskIndividualAssessment$Status": "

Individual assessment status.

This status can have one of the following values:

", "ResourcePendingMaintenanceActions$ResourceIdentifier": "

The Amazon Resource Name (ARN) of the DMS resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an Amazon Resource Name (ARN) for AWS DMS in the DMS documentation.

", "S3Settings$ServiceAccessRoleArn": "

The Amazon Resource Name (ARN) used by the service access IAM role.

", "S3Settings$ExternalTableDefinition": "

The external table definition.

", @@ -1519,6 +1769,13 @@ "SchemaList$member": null, "SourceIdsList$member": null, "StartReplicationTaskAssessmentMessage$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", + "StartReplicationTaskAssessmentRunMessage$ReplicationTaskArn": "

Amazon Resource Name (ARN) of the migration task associated with the premigration assessment run that you want to start.

", + "StartReplicationTaskAssessmentRunMessage$ServiceAccessRoleArn": "

ARN of a service role needed to start the assessment run.

", + "StartReplicationTaskAssessmentRunMessage$ResultLocationBucket": "

Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.

", + "StartReplicationTaskAssessmentRunMessage$ResultLocationFolder": "

Folder within an Amazon S3 bucket where you want AWS DMS to store the results of this assessment run.

", + "StartReplicationTaskAssessmentRunMessage$ResultEncryptionMode": "

Encryption mode that you can specify to encrypt the results of this assessment run. If you don't specify this request parameter, AWS DMS stores the assessment run results without encryption. You can specify one of the options following:

", + "StartReplicationTaskAssessmentRunMessage$ResultKmsKeyArn": "

ARN of a custom KMS encryption key that you specify when you set ResultEncryptionMode to \"SSE_KMS\".

", + "StartReplicationTaskAssessmentRunMessage$AssessmentRunName": "

Unique name to identify the assessment run.

", "StartReplicationTaskMessage$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task to be started.

", "StartReplicationTaskMessage$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

When you use this task setting with a source PostgreSQL database, a logical replication slot should already be created and associated with the source endpoint. You can verify this by setting the slotName extra connection attribute to the name of this logical replication slot. For more information, see Extra Connection Attributes When Using PostgreSQL as a Source for AWS DMS.

", "StartReplicationTaskMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 “

", @@ -1529,6 +1786,9 @@ "SupportedEndpointType$EngineName": "

The database engine name. Valid values, depending on the EndpointType, include \"mysql\", \"oracle\", \"postgres\", \"mariadb\", \"aurora\", \"aurora-postgresql\", \"redshift\", \"s3\", \"db2\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\", \"kinesis\", \"kafka\", \"elasticsearch\", \"documentdb\", \"sqlserver\", and \"neptune\".

", "SupportedEndpointType$ReplicationInstanceEngineMinimumVersion": "

The earliest AWS DMS engine version that supports this endpoint engine. Note that endpoint engines released with AWS DMS versions earlier than 3.1.1 do not return a value for this parameter.

", "SupportedEndpointType$EngineDisplayName": "

The expanded name for the engine name. For example, if the EngineName parameter is \"aurora,\" this value would be \"Amazon Aurora MySQL.\"

", + "SybaseSettings$DatabaseName": "

Database name for the endpoint.

", + "SybaseSettings$ServerName": "

Fully qualified domain name of the endpoint.

", + "SybaseSettings$Username": "

Endpoint connection user name.

", "TableStatistics$SchemaName": "

The schema name.

", "TableStatistics$TableName": "

The name of the table.

", "TableStatistics$TableState": "

The state of the tables described.

Valid states: Table does not exist | Before load | Full load | Table completed | Table cancelled | Table error | Table all | Table updates | Table is being reloaded

", @@ -1581,6 +1841,14 @@ "DescribeEndpointTypesResponse$SupportedEndpointTypes": "

The types of endpoints that are supported.

" } }, + "SybaseSettings": { + "base": "

Provides information that defines a SAP ASE endpoint.

", + "refs": { + "CreateEndpointMessage$SybaseSettings": "

Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.

", + "Endpoint$SybaseSettings": "

The settings for the SAP ASE source and target endpoint. For more information, see the SybaseSettings structure.

", + "ModifyEndpointMessage$SybaseSettings": "

Settings in JSON format for the source and target SAP ASE endpoint. For information about other available settings, see Extra connection attributes when using SAP ASE as a source for AWS DMS and Extra connection attributes when using SAP ASE as a target for AWS DMS in the AWS Database Migration Service User Guide.

" + } + }, "TStamp": { "base": null, "refs": { @@ -1601,6 +1869,8 @@ "ReplicationTask$ReplicationTaskCreationDate": "

The date the replication task was created.

", "ReplicationTask$ReplicationTaskStartDate": "

The date the replication task is scheduled to start.

", "ReplicationTaskAssessmentResult$ReplicationTaskLastAssessmentDate": "

The date the task assessment was completed.

", + "ReplicationTaskAssessmentRun$ReplicationTaskAssessmentRunCreationDate": "

Date on which the assessment run was created using the StartReplicationTaskAssessmentRun operation.

", + "ReplicationTaskIndividualAssessment$ReplicationTaskIndividualAssessmentStartDate": "

Date when this individual assessment was started as part of running the StartReplicationTaskAssessmentRun operation.

", "ReplicationTaskStats$FreshStartDate": "

The date the replication task was started either with a fresh start or a target reload.

", "ReplicationTaskStats$StartDate": "

The date the replication task was started either with a fresh start or a resume. For more information, see StartReplicationTaskType.

", "ReplicationTaskStats$StopDate": "

The date the replication task was stopped.

", diff --git a/models/apis/dms/2016-01-01/paginators-1.json b/models/apis/dms/2016-01-01/paginators-1.json index 755ab793006..1cd57271f0b 100644 --- a/models/apis/dms/2016-01-01/paginators-1.json +++ b/models/apis/dms/2016-01-01/paginators-1.json @@ -1,5 +1,10 @@ { "pagination": { + "DescribeApplicableIndividualAssessments": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeCertificates": { "input_token": "Marker", "output_token": "Marker", @@ -60,6 +65,16 @@ "output_token": "Marker", "limit_key": "MaxRecords" }, + "DescribeReplicationTaskAssessmentRuns": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeReplicationTaskIndividualAssessments": { + "input_token": "Marker", + "output_token": "Marker", + "limit_key": "MaxRecords" + }, "DescribeReplicationTasks": { "input_token": "Marker", "output_token": "Marker", diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 33379458cd4..e937f286a15 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -18253,6 +18253,15 @@ "r6g.8xlarge", "r6g.12xlarge", "r6g.16xlarge", + "r6gd.metal", + "r6gd.medium", + "r6gd.large", + "r6gd.xlarge", + "r6gd.2xlarge", + "r6gd.4xlarge", + "r6gd.8xlarge", + "r6gd.12xlarge", + "r6gd.16xlarge", "x1.16xlarge", "x1.32xlarge", "x1e.xlarge", @@ -18335,6 +18344,15 @@ "c6g.8xlarge", "c6g.12xlarge", "c6g.16xlarge", + "c6gd.metal", + "c6gd.medium", + "c6gd.large", + "c6gd.xlarge", + "c6gd.2xlarge", + "c6gd.4xlarge", + "c6gd.8xlarge", + "c6gd.12xlarge", + "c6gd.16xlarge", "cc1.4xlarge", "cc2.8xlarge", "g2.2xlarge", @@ -18465,7 +18483,16 @@ "m6g.4xlarge", "m6g.8xlarge", "m6g.12xlarge", - "m6g.16xlarge" + "m6g.16xlarge", + "m6gd.metal", + "m6gd.medium", + "m6gd.large", + "m6gd.xlarge", + "m6gd.2xlarge", + "m6gd.4xlarge", + "m6gd.8xlarge", + "m6gd.12xlarge", + "m6gd.16xlarge" ] }, "InstanceTypeHypervisor":{ diff --git a/models/apis/frauddetector/2019-11-15/api-2.json b/models/apis/frauddetector/2019-11-15/api-2.json index 8bdaa1dc0ab..4395b450dae 100644 --- a/models/apis/frauddetector/2019-11-15/api-2.json +++ b/models/apis/frauddetector/2019-11-15/api-2.json @@ -797,7 +797,7 @@ "eventTypeName" ], "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "description":{"shape":"description"}, "eventTypeName":{"shape":"string"}, @@ -818,7 +818,7 @@ "trainingDataSchema" ], "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "trainingDataSource":{"shape":"TrainingDataSourceEnum"}, "trainingDataSchema":{"shape":"TrainingDataSchema"}, @@ -829,7 +829,7 @@ "CreateModelVersionResult":{ "type":"structure", "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "modelVersionNumber":{"shape":"nonEmptyString"}, "status":{"shape":"string"} @@ -989,7 +989,7 @@ "DescribeModelVersionsRequest":{ "type":"structure", "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelVersionNumber":{"shape":"floatVersionString"}, "modelType":{"shape":"ModelTypeEnum"}, "nextToken":{"shape":"string"}, @@ -1053,6 +1053,10 @@ }, "Entity":{ "type":"structure", + "required":[ + "entityType", + "entityId" + ], "members":{ "entityType":{"shape":"string"}, "entityId":{"shape":"identifier"} @@ -1102,7 +1106,6 @@ "type":"structure", "members":{ "modelEndpoint":{"shape":"string"}, - "eventTypeName":{"shape":"identifier"}, "modelSource":{"shape":"ModelSource"}, "invokeModelEndpointRoleArn":{"shape":"string"}, "inputConfiguration":{"shape":"ModelInputConfiguration"}, @@ -1291,7 +1294,7 @@ "modelVersionNumber" ], "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "modelVersionNumber":{"shape":"floatVersionString"} } @@ -1299,7 +1302,7 @@ "GetModelVersionResult":{ "type":"structure", "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "modelVersionNumber":{"shape":"floatVersionString"}, "trainingDataSource":{"shape":"TrainingDataSourceEnum"}, @@ -1312,7 +1315,7 @@ "GetModelsRequest":{ "type":"structure", "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "nextToken":{"shape":"string"}, "maxResults":{"shape":"modelsMaxPageSize"} @@ -1464,7 +1467,7 @@ "Model":{ "type":"structure", "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "description":{"shape":"description"}, "eventTypeName":{"shape":"string"}, @@ -1491,6 +1494,7 @@ "type":"structure", "required":["useEventVariables"], "members":{ + "eventTypeName":{"shape":"identifier"}, "format":{"shape":"ModelInputDataFormat"}, "useEventVariables":{"shape":"UseEventVariables"}, "jsonInputTemplate":{"shape":"string"}, @@ -1548,7 +1552,7 @@ "modelVersionNumber" ], "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "modelVersionNumber":{"shape":"nonEmptyString"}, "arn":{"shape":"fraudDetectorArn"} @@ -1557,7 +1561,7 @@ "ModelVersionDetail":{ "type":"structure", "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "modelVersionNumber":{"shape":"floatVersionString"}, "status":{"shape":"string"}, @@ -1673,7 +1677,6 @@ ], "members":{ "modelEndpoint":{"shape":"sageMakerEndpointIdentifier"}, - "eventTypeName":{"shape":"identifier"}, "modelSource":{"shape":"ModelSource"}, "invokeModelEndpointRoleArn":{"shape":"string"}, "inputConfiguration":{"shape":"ModelInputConfiguration"}, @@ -1943,7 +1946,7 @@ "modelType" ], "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "description":{"shape":"description"} } @@ -1961,7 +1964,7 @@ "majorVersionNumber" ], "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "majorVersionNumber":{"shape":"wholeNumberVersionString"}, "externalEventsDetail":{"shape":"ExternalEventsDetail"}, @@ -1971,7 +1974,7 @@ "UpdateModelVersionResult":{ "type":"structure", "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "modelVersionNumber":{"shape":"floatVersionString"}, "status":{"shape":"string"} @@ -1986,7 +1989,7 @@ "status" ], "members":{ - "modelId":{"shape":"identifier"}, + "modelId":{"shape":"modelIdentifier"}, "modelType":{"shape":"ModelTypeEnum"}, "modelVersionNumber":{"shape":"floatVersionString"}, "status":{"shape":"ModelVersionStatus"} @@ -2189,6 +2192,12 @@ "type":"list", "member":{"shape":"MetricDataPoint"} }, + "modelIdentifier":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[0-9a-z_]+$" + }, "modelList":{ "type":"list", "member":{"shape":"Model"} diff --git a/models/apis/frauddetector/2019-11-15/docs-2.json b/models/apis/frauddetector/2019-11-15/docs-2.json index bc7197673be..024e7aab226 100644 --- a/models/apis/frauddetector/2019-11-15/docs-2.json +++ b/models/apis/frauddetector/2019-11-15/docs-2.json @@ -1162,41 +1162,30 @@ "refs": { "CreateDetectorVersionRequest$detectorId": "

The ID of the detector under which you want to create a new version.

", "CreateDetectorVersionResult$detectorId": "

The ID for the created version's parent detector.

", - "CreateModelRequest$modelId": "

The model ID.

", - "CreateModelVersionRequest$modelId": "

The model ID.

", - "CreateModelVersionResult$modelId": "

The model ID.

", "CreateRuleRequest$ruleId": "

The rule ID.

", "CreateRuleRequest$detectorId": "

The detector ID for the rule's parent detector.

", "DeleteDetectorRequest$detectorId": "

The ID of the detector to delete.

", "DeleteDetectorVersionRequest$detectorId": "

The ID of the parent detector for the detector version to delete.

", "DescribeDetectorRequest$detectorId": "

The detector ID.

", "DescribeDetectorResult$detectorId": "

The detector ID.

", - "DescribeModelVersionsRequest$modelId": "

The model ID.

", "Detector$detectorId": "

The detector ID.

", "Detector$eventTypeName": "

The name of the event type.

", "Entity$entityId": "

The entity ID. If you do not know the entityId, you can pass unknown, which is areserved string literal.

", - "ExternalModel$eventTypeName": "

The event type names.

", "GetDetectorVersionRequest$detectorId": "

The detector ID.

", "GetDetectorVersionResult$detectorId": "

The detector ID.

", "GetDetectorsRequest$detectorId": "

The detector ID.

", "GetEntityTypesRequest$name": "

The name.

", "GetEventTypesRequest$name": "

The name.

", "GetLabelsRequest$name": "

The name of the label or labels to get.

", - "GetModelVersionRequest$modelId": "

The model ID.

", - "GetModelVersionResult$modelId": "

The model ID.

", - "GetModelsRequest$modelId": "

The model ID.

", "GetOutcomesRequest$name": "

The name of the outcome or outcomes to get.

", "GetRulesRequest$ruleId": "

The rule ID.

", "GetRulesRequest$detectorId": "

The detector ID.

", - "Model$modelId": "

The model ID.

", - "ModelVersion$modelId": "

The model ID.

", - "ModelVersionDetail$modelId": "

The model ID.

", + "ModelInputConfiguration$eventTypeName": "

The event type name.

", "Outcome$name": "

The outcome name.

", "PutDetectorRequest$detectorId": "

The detector ID.

", "PutDetectorRequest$eventTypeName": "

The name of the event type.

", "PutEntityTypeRequest$name": "

The name of the entity type.

", "PutEventTypeRequest$name": "

The name.

", - "PutExternalModelRequest$eventTypeName": "

The event type name.

", "PutLabelRequest$name": "

The label name.

", "PutOutcomeRequest$name": "

The name of the outcome.

", "Rule$detectorId": "

The detector for which the rule is associated.

", @@ -1205,11 +1194,7 @@ "RuleDetail$detectorId": "

The detector for which the rule is associated.

", "UpdateDetectorVersionMetadataRequest$detectorId": "

The detector ID.

", "UpdateDetectorVersionRequest$detectorId": "

The parent detector ID for the detector version you want to update.

", - "UpdateDetectorVersionStatusRequest$detectorId": "

The detector ID.

", - "UpdateModelRequest$modelId": "

The model ID.

", - "UpdateModelVersionRequest$modelId": "

The model ID.

", - "UpdateModelVersionResult$modelId": "

The model ID.

", - "UpdateModelVersionStatusRequest$modelId": "

The model ID of the model version to update.

" + "UpdateDetectorVersionStatusRequest$detectorId": "

The detector ID.

" } }, "integer": { @@ -1249,6 +1234,25 @@ "TrainingMetrics$metricDataPoints": "

The data points details.

" } }, + "modelIdentifier": { + "base": null, + "refs": { + "CreateModelRequest$modelId": "

The model ID.

", + "CreateModelVersionRequest$modelId": "

The model ID.

", + "CreateModelVersionResult$modelId": "

The model ID.

", + "DescribeModelVersionsRequest$modelId": "

The model ID.

", + "GetModelVersionRequest$modelId": "

The model ID.

", + "GetModelVersionResult$modelId": "

The model ID.

", + "GetModelsRequest$modelId": "

The model ID.

", + "Model$modelId": "

The model ID.

", + "ModelVersion$modelId": "

The model ID.

", + "ModelVersionDetail$modelId": "

The model ID.

", + "UpdateModelRequest$modelId": "

The model ID.

", + "UpdateModelVersionRequest$modelId": "

The model ID.

", + "UpdateModelVersionResult$modelId": "

The model ID.

", + "UpdateModelVersionStatusRequest$modelId": "

The model ID of the model version to update.

" + } + }, "modelList": { "base": null, "refs": { diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index 3424fee6548..531ec633973 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -1539,6 +1539,23 @@ {"shape":"OperationTimeoutException"} ] }, + "ResumeWorkflowRun":{ + "name":"ResumeWorkflowRun", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ResumeWorkflowRunRequest"}, + "output":{"shape":"ResumeWorkflowRunResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ConcurrentRunsExceededException"}, + {"shape":"IllegalWorkflowStateException"} + ] + }, "SearchTables":{ "name":"SearchTables", "http":{ @@ -5267,6 +5284,10 @@ "CrawlerDetails":{"shape":"CrawlerNodeDetails"} } }, + "NodeIdList":{ + "type":"list", + "member":{"shape":"NameString"} + }, "NodeList":{ "type":"list", "member":{"shape":"Node"} @@ -5607,6 +5628,26 @@ "max":1000, "min":0 }, + "ResumeWorkflowRunRequest":{ + "type":"structure", + "required":[ + "Name", + "RunId", + "NodeIds" + ], + "members":{ + "Name":{"shape":"NameString"}, + "RunId":{"shape":"IdString"}, + "NodeIds":{"shape":"NodeIdList"} + } + }, + "ResumeWorkflowRunResponse":{ + "type":"structure", + "members":{ + "RunId":{"shape":"IdString"}, + "NodeIds":{"shape":"NodeIdList"} + } + }, "Role":{"type":"string"}, "RoleArn":{ "type":"string", @@ -6831,6 +6872,7 @@ "members":{ "Name":{"shape":"NameString"}, "WorkflowRunId":{"shape":"IdString"}, + "PreviousRunId":{"shape":"IdString"}, "WorkflowRunProperties":{"shape":"WorkflowRunProperties"}, "StartedOn":{"shape":"TimestampValue"}, "CompletedOn":{"shape":"TimestampValue"}, diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index aef03c873a0..576beff8749 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -103,6 +103,7 @@ "PutResourcePolicy": "

Sets the Data Catalog resource policy for access control.

", "PutWorkflowRunProperties": "

Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.

", "ResetJobBookmark": "

Resets a bookmark entry.

", + "ResumeWorkflowRun": "

Restarts any completed nodes in a workflow run and resumes the run execution.

", "SearchTables": "

Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions.

You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.

", "StartCrawler": "

Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException.

", "StartCrawlerSchedule": "

Changes the schedule state of the specified crawler to SCHEDULED, unless the crawler is already running or the schedule state is already SCHEDULED.

", @@ -1487,7 +1488,7 @@ } }, "Edge": { - "base": "

An edge represents a directed connection between two AWS Glue components which are part of the workflow the edge belongs to.

", + "base": "

An edge represents a directed connection between two AWS Glue components that are part of the workflow the edge belongs to.

", "refs": { "EdgeList$member": null } @@ -1697,7 +1698,7 @@ "GetTriggersResponse$NextToken": "

A continuation token, if not all the requested triggers have yet been returned.

", "GetWorkflowRunsRequest$NextToken": "

The maximum size of the response.

", "GetWorkflowRunsResponse$NextToken": "

A continuation token, if not all requested workflow runs have been returned.

", - "JobCommand$Name": "

The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell.

", + "JobCommand$Name": "

The name of the job command. For an Apache Spark ETL job, this must be glueetl. For a Python shell job, it must be pythonshell. For an Apache Spark streaming ETL job, this must be gluestreaming.

", "JobRun$LogGroupName": "

The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using AWS KMS. This name can be /aws-glue/jobs/, in which case the default encryption is NONE. If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration is used to encrypt the log group.

", "ListDevEndpointsRequest$NextToken": "

A continuation token, if this is a continuation request.

", "ListDevEndpointsResponse$NextToken": "

A continuation token, if the returned list does not contain the last metric available.

", @@ -2328,12 +2329,15 @@ "JobRun$PreviousRunId": "

The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action.

", "Predecessor$RunId": "

The job-run ID of the predecessor job run.

", "PutWorkflowRunPropertiesRequest$RunId": "

The ID of the workflow run for which the run properties should be updated.

", + "ResumeWorkflowRunRequest$RunId": "

The ID of the workflow run to resume.

", + "ResumeWorkflowRunResponse$RunId": "

The new ID assigned to the resumed workflow run. Each resume of a workflow run will have a new run ID.

", "StartJobRunRequest$JobRunId": "

The ID of a previous JobRun to retry.

", "StartJobRunResponse$JobRunId": "

The ID assigned to this job run.

", "StartWorkflowRunResponse$RunId": "

An Id for the new run.

", "StopWorkflowRunRequest$RunId": "

The ID of the workflow run to stop.

", "Trigger$Id": "

Reserved for future use.

", "WorkflowRun$WorkflowRunId": "

The ID of this workflow run.

", + "WorkflowRun$PreviousRunId": "

The ID of the previous workflow run.

", "WorkflowRunProperties$key": null } }, @@ -2394,10 +2398,10 @@ "JobUpdate$AllocatedCapacity": "

This field is deprecated. Use MaxCapacity instead.

The number of AWS Glue data processing units (DPUs) to allocate to this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", "StartJobRunRequest$AllocatedCapacity": "

This field is deprecated. Use MaxCapacity instead.

The number of AWS Glue data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

", "WorkflowRunStatistics$TotalActions": "

Total number of Actions in the workflow run.

", - "WorkflowRunStatistics$TimeoutActions": "

Total number of Actions which timed out.

", - "WorkflowRunStatistics$FailedActions": "

Total number of Actions which have failed.

", - "WorkflowRunStatistics$StoppedActions": "

Total number of Actions which have stopped.

", - "WorkflowRunStatistics$SucceededActions": "

Total number of Actions which have succeeded.

", + "WorkflowRunStatistics$TimeoutActions": "

Total number of Actions that timed out.

", + "WorkflowRunStatistics$FailedActions": "

Total number of Actions that have failed.

", + "WorkflowRunStatistics$StoppedActions": "

Total number of Actions that have stopped.

", + "WorkflowRunStatistics$SucceededActions": "

Total number of Actions that have succeeded.

", "WorkflowRunStatistics$RunningActions": "

Total number Actions in running state.

" } }, @@ -2503,8 +2507,8 @@ "JobRunState": { "base": null, "refs": { - "Condition$State": "

The condition state. Currently, the only job states that a trigger can listen for are SUCCEEDED, STOPPED, FAILED, and TIMEOUT. The only crawler states that a trigger can listen for are SUCCEEDED, FAILED, and CANCELLED.

", - "JobRun$JobRunState": "

The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see AWS Glue Job Run Statuses.

" + "Condition$State": "

The condition state. Currently, the values supported are SUCCEEDED, STOPPED, TIMEOUT, and FAILED.

", + "JobRun$JobRunState": "

The current state of the job run.

" } }, "JobUpdate": { @@ -2944,6 +2948,7 @@ "NameStringList$member": null, "Node$Name": "

The name of the AWS Glue component represented by the node.

", "Node$UniqueId": "

The unique Id assigned to the node within the workflow.

", + "NodeIdList$member": null, "Order$Column": "

The name of the column.

", "Partition$DatabaseName": "

The name of the catalog database in which to create the partition.

", "Partition$TableName": "

The name of the database table in which to create the partition.

", @@ -2951,6 +2956,7 @@ "PhysicalConnectionRequirements$AvailabilityZone": "

The connection's Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future.

", "Predecessor$JobName": "

The name of the job definition used by the predecessor job run.

", "PutWorkflowRunPropertiesRequest$Name": "

Name of the workflow which was run.

", + "ResumeWorkflowRunRequest$Name": "

The name of the workflow to resume.

", "SecurityConfiguration$Name": "

The name of the security configuration.

", "SecurityGroupIdList$member": null, "SerDeInfo$Name": "

Name of the SerDe.

", @@ -3014,7 +3020,7 @@ "UserDefinedFunctionInput$OwnerName": "

The owner of the function.

", "Workflow$Name": "

The name of the workflow representing the flow.

", "WorkflowNames$member": null, - "WorkflowRun$Name": "

Name of the workflow which was executed.

", + "WorkflowRun$Name": "

Name of the workflow that was executed.

", "XMLClassifier$Name": "

The name of the classifier.

" } }, @@ -3032,11 +3038,18 @@ } }, "Node": { - "base": "

A node represents an AWS Glue component like Trigger, Job etc. which is part of a workflow.

", + "base": "

A node represents an AWS Glue component such as a trigger, or job, etc., that is part of a workflow.

", "refs": { "NodeList$member": null } }, + "NodeIdList": { + "base": null, + "refs": { + "ResumeWorkflowRunRequest$NodeIds": "

A list of the node IDs for the nodes you want to restart. The nodes that are to be restarted must have an execution attempt in the original run.

", + "ResumeWorkflowRunResponse$NodeIds": "

A list of the node IDs for the nodes that were actually restarted.

" + } + }, "NodeList": { "base": null, "refs": { @@ -3128,13 +3141,13 @@ "NullableDouble": { "base": null, "refs": { - "CreateJobRequest$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

", + "CreateJobRequest$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

", "CreateMLTransformRequest$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

", "DynamoDBTarget$scanRate": "

The percentage of the configured read capacity units to use by the AWS Glue crawler. Read capacity units is a term defined by DynamoDB, and is a numeric value that acts as rate limiter for the number of reads that can be performed on that table per second.

The valid values are null or a value between 0.1 to 1.5. A null value is used when user does not provide a value, and defaults to 0.5 of the configured Read Capacity Unit (for provisioned tables), or 0.25 of the max configured Read Capacity Unit (for tables using on-demand mode).

", "GetMLTransformResponse$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

", - "Job$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

", + "Job$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:

", "JobRun$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

", - "JobUpdate$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

", + "JobUpdate$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job:

", "MLTransform$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

", "StartJobRunRequest$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

Do not set Max Capacity if using WorkerType and NumberOfWorkers.

The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, or an Apache Spark ETL job:

", "UpdateMLTransformRequest$MaxCapacity": "

The number of AWS Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page.

When the WorkerType field is set to a value other than Standard, the MaxCapacity field is set automatically and becomes read-only.

" @@ -3498,6 +3511,16 @@ "UserDefinedFunctionInput$ResourceUris": "

The resource URIs for the function.

" } }, + "ResumeWorkflowRunRequest": { + "base": null, + "refs": { + } + }, + "ResumeWorkflowRunResponse": { + "base": null, + "refs": { + } + }, "Role": { "base": null, "refs": { diff --git a/models/apis/ssm/2014-11-06/docs-2.json b/models/apis/ssm/2014-11-06/docs-2.json index d1647181c3c..27229185bc3 100644 --- a/models/apis/ssm/2014-11-06/docs-2.json +++ b/models/apis/ssm/2014-11-06/docs-2.json @@ -954,7 +954,7 @@ } }, "CommandFilter": { - "base": "

Describes a command filter.

", + "base": "

Describes a command filter.

An instance ID can't be specified when a command status is Pending because the command hasn't run on the instance yet.

", "refs": { "CommandFilterList$member": null } @@ -1038,7 +1038,7 @@ "base": null, "refs": { "CommandPlugin$Name": "

The name of the plugin. Must be one of the following: aws:updateAgent, aws:domainjoin, aws:applications, aws:runPowerShellScript, aws:psmodule, aws:cloudWatch, aws:runShellScript, or aws:updateSSMAgent.

", - "GetCommandInvocationRequest$PluginName": "

(Optional) The name of the plugin for which you want detailed results. If the document contains only one plugin, the name can be omitted and the details will be returned.

", + "GetCommandInvocationRequest$PluginName": "

(Optional) The name of the plugin for which you want detailed results. If the document contains only one plugin, the name can be omitted and the details will be returned.

Plugin names are also referred to as step names in Systems Manager documents.

", "GetCommandInvocationResult$PluginName": "

The name of the plugin for which you want detailed results. For example, aws:RunShellScript is a plugin.

" } }, @@ -2687,7 +2687,7 @@ "refs": { "Activation$IamRole": "

The Amazon Identity and Access Management (IAM) role to assign to the managed instance.

", "CreateActivationRequest$IamRole": "

The Amazon Identity and Access Management (IAM) role that you want to assign to the managed instance. This IAM role must provide AssumeRole permissions for the Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid environment in the AWS Systems Manager User Guide.

", - "InstanceInformation$IamRole": "

The Amazon Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instances. This call does not return the IAM role for EC2 instances.

", + "InstanceInformation$IamRole": "

The Amazon Identity and Access Management (IAM) role assigned to the on-premises Systems Manager managed instance. This call does not return the IAM role for EC2 instances. To retrieve the IAM role for an EC2 instance, use the Amazon EC2 DescribeInstances action. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the AWS CLI Command Reference.

", "UpdateManagedInstanceRoleRequest$IamRole": "

The IAM role you want to assign or change.

" } }, @@ -2801,7 +2801,7 @@ "InstanceInformation$InstanceId": "

The instance ID.

", "InstancePatchState$InstanceId": "

The ID of the managed instance the high-level patch compliance information was collected for.

", "ListCommandInvocationsRequest$InstanceId": "

(Optional) The command execution details for a specific instance ID.

", - "ListCommandsRequest$InstanceId": "

(Optional) Lists commands issued against this instance ID.

", + "ListCommandsRequest$InstanceId": "

(Optional) Lists commands issued against this instance ID.

You can't specify an instance ID in the same command that you specify Status = Pending. This is because the command has not reached the instance yet.

", "ListInventoryEntriesRequest$InstanceId": "

The instance ID for which you want inventory information.

", "ListInventoryEntriesResult$InstanceId": "

The instance ID targeted by the request to query inventory information.

", "PutInventoryRequest$InstanceId": "

An instance ID where you want to add or update inventory items.

", @@ -2814,7 +2814,7 @@ "CancelCommandRequest$InstanceIds": "

(Optional) A list of instance IDs on which you want to cancel the command. If not provided, the command is canceled on every instance on which it was requested.

", "Command$InstanceIds": "

The instance IDs against which this command was requested.

", "DescribeInstancePatchStatesRequest$InstanceIds": "

The ID of the instance whose patch state information should be retrieved.

", - "SendCommandRequest$InstanceIds": "

The instance IDs where the command should run. You can specify a maximum of 50 IDs. If you prefer not to list individual instance IDs, you can instead send commands to a fleet of instances using the Targets parameter, which accepts EC2 tags. For more information about how to use targets, see Using targets and rate controls to send commands to a fleet in the AWS Systems Manager User Guide.

" + "SendCommandRequest$InstanceIds": "

The IDs of the instances where the command should run. Specifying instance IDs is most useful when you are targeting a limited number of instances, though you can specify up to 50 IDs.

To target a larger number of instances, or if you prefer not to list individual instance IDs, we recommend using the Targets option instead. Using Targets, which accepts tag key-value pairs to identify the instances to send commands to, you can a send command to tens, hundreds, or thousands of instances at once.

For more information about how to use targets, see Using targets and rate controls to send commands to a fleet in the AWS Systems Manager User Guide.

" } }, "InstanceInformation": { @@ -5499,7 +5499,7 @@ "DescribeInstancePatchesRequest$Filters": "

An array of structures. Each entry in the array is a structure containing a Key, Value combination. Valid values for Key are Classification | KBId | Severity | State.

", "DescribeMaintenanceWindowScheduleRequest$Filters": "

Filters used to limit the range of results. For example, you can limit maintenance window executions to only those scheduled before or after a certain date and time.

", "DescribePatchBaselinesRequest$Filters": "

Each element in the array is a structure containing:

Key: (string, \"NAME_PREFIX\" or \"OWNER\")

Value: (array of strings, exactly 1 entry, between 1 and 255 characters)

", - "DescribePatchGroupsRequest$Filters": "

One or more filters. Use a filter to return a more specific list of results.

" + "DescribePatchGroupsRequest$Filters": "

One or more filters. Use a filter to return a more specific list of results.

For DescribePatchGroups,valid filter keys include the following:

" } }, "PatchOrchestratorFilterValue": { @@ -6609,7 +6609,7 @@ "IncompatiblePolicyException$message": null, "InstanceInformation$PlatformName": "

The name of the operating system platform running on your instance.

", "InstanceInformation$PlatformVersion": "

The version of the OS platform running on your instance.

", - "InstanceInformation$Name": "

The name of the managed instance.

", + "InstanceInformation$Name": "

The name assigned to an on-premises server or virtual machine (VM) when it is activated as a Systems Manager managed instance. The name is specified as the DefaultInstanceName property using the CreateActivation command. It is applied to the managed instance by specifying the Activation Code and Activation ID when you install SSM Agent on the instance, as explained in Install SSM Agent for a hybrid environment (Linux) and Install SSM Agent for a hybrid environment (Windows). To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances action. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the AWS CLI Command Reference.

", "InternalServerError$Message": null, "InvalidActivation$Message": null, "InvalidActivationId$Message": null, @@ -6774,7 +6774,7 @@ } }, "Target": { - "base": "

An array of search criteria that targets instances using a Key,Value combination that you specify.

Supported formats include the following.

For example:

For information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the AWS Systems Manager User Guide.

", + "base": "

An array of search criteria that targets instances using a Key,Value combination that you specify.

Supported formats include the following.

For example:

For more information about how to send commands that target instances using Key,Value parameters, see Targeting multiple instances in the AWS Systems Manager User Guide.

", "refs": { "Targets$member": null } @@ -6892,7 +6892,7 @@ "MaintenanceWindowTask$Targets": "

The targets (either instances or tags). Instances are specified using Key=instanceids,Values=<instanceid1>,<instanceid2>. Tags are specified using Key=<tag name>,Values=<tag value>.

", "RegisterTargetWithMaintenanceWindowRequest$Targets": "

The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs.

You can specify targets using instance IDs, resource group names, or tags that have been applied to instances.

Example 1: Specify instance IDs

Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

Example 2: Use tag key-pairs applied to instances

Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

Example 3: Use tag-keys applied to instances

Key=tag-key,Values=my-tag-key-1,my-tag-key-2

Example 4: Use resource group names

Key=resource-groups:Name,Values=resource-group-name

Example 5: Use filters for resource group types

Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

For Key=resource-groups:ResourceTypeFilters, specify resource types in the following format

Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

For more information about these examples formats, including the best use case for each one, see Examples: Register targets with a maintenance window in the AWS Systems Manager User Guide.

", "RegisterTaskWithMaintenanceWindowRequest$Targets": "

The targets (either instances or maintenance window targets).

Specify instances using the following format:

Key=InstanceIds,Values=<instance-id-1>,<instance-id-2>

Specify maintenance window targets using the following format:

Key=WindowTargetIds;,Values=<window-target-id-1>,<window-target-id-2>

", - "SendCommandRequest$Targets": "

(Optional) An array of search criteria that targets instances using a Key,Value combination that you specify. Targets is required if you don't provide one or more instance IDs in the call. For more information about how to use targets, see Sending commands to a fleet in the AWS Systems Manager User Guide.

", + "SendCommandRequest$Targets": "

An array of search criteria that targets instances using a Key,Value combination that you specify. Specifying targets is most useful when you want to send a command to a large number of instances at once. Using Targets, which accepts tag key-value pairs to identify instances, you can send a command to tens, hundreds, or thousands of instances at once.

To send a command to a smaller number of instances, you can use the InstanceIds option instead.

For more information about how to use targets, see Sending commands to a fleet in the AWS Systems Manager User Guide.

", "StartAutomationExecutionRequest$Targets": "

A key-value mapping to target resources. Required if you specify TargetParameterName.

", "StepExecution$Targets": "

The targets for the step execution.

", "UpdateAssociationRequest$Targets": "

The targets of the association.

", diff --git a/service/databasemigrationservice/api.go b/service/databasemigrationservice/api.go index e7dc80655ec..6a926afa588 100644 --- a/service/databasemigrationservice/api.go +++ b/service/databasemigrationservice/api.go @@ -176,6 +176,97 @@ func (c *DatabaseMigrationService) ApplyPendingMaintenanceActionWithContext(ctx return out, req.Send() } +const opCancelReplicationTaskAssessmentRun = "CancelReplicationTaskAssessmentRun" + +// CancelReplicationTaskAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the CancelReplicationTaskAssessmentRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelReplicationTaskAssessmentRun for more information on using the CancelReplicationTaskAssessmentRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelReplicationTaskAssessmentRunRequest method. +// req, resp := client.CancelReplicationTaskAssessmentRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CancelReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) CancelReplicationTaskAssessmentRunRequest(input *CancelReplicationTaskAssessmentRunInput) (req *request.Request, output *CancelReplicationTaskAssessmentRunOutput) { + op := &request.Operation{ + Name: opCancelReplicationTaskAssessmentRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelReplicationTaskAssessmentRunInput{} + } + + output = &CancelReplicationTaskAssessmentRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelReplicationTaskAssessmentRun API operation for AWS Database Migration Service. +// +// Cancels a single premigration assessment run. +// +// This operation prevents any individual assessments from running if they haven't +// started running. It also attempts to cancel any individual assessments that +// are currently running. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation CancelReplicationTaskAssessmentRun for usage and error information. +// +// Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CancelReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) CancelReplicationTaskAssessmentRun(input *CancelReplicationTaskAssessmentRunInput) (*CancelReplicationTaskAssessmentRunOutput, error) { + req, out := c.CancelReplicationTaskAssessmentRunRequest(input) + return out, req.Send() +} + +// CancelReplicationTaskAssessmentRunWithContext is the same as CancelReplicationTaskAssessmentRun with the addition of +// the ability to pass a context and additional request options. +// +// See CancelReplicationTaskAssessmentRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) CancelReplicationTaskAssessmentRunWithContext(ctx aws.Context, input *CancelReplicationTaskAssessmentRunInput, opts ...request.Option) (*CancelReplicationTaskAssessmentRunOutput, error) { + req, out := c.CancelReplicationTaskAssessmentRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateEndpoint = "CreateEndpoint" // CreateEndpointRequest generates a "aws/request.Request" representing the @@ -1295,6 +1386,97 @@ func (c *DatabaseMigrationService) DeleteReplicationTaskWithContext(ctx aws.Cont return out, req.Send() } +const opDeleteReplicationTaskAssessmentRun = "DeleteReplicationTaskAssessmentRun" + +// DeleteReplicationTaskAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationTaskAssessmentRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteReplicationTaskAssessmentRun for more information on using the DeleteReplicationTaskAssessmentRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteReplicationTaskAssessmentRunRequest method. +// req, resp := client.DeleteReplicationTaskAssessmentRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DeleteReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRunRequest(input *DeleteReplicationTaskAssessmentRunInput) (req *request.Request, output *DeleteReplicationTaskAssessmentRunOutput) { + op := &request.Operation{ + Name: opDeleteReplicationTaskAssessmentRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationTaskAssessmentRunInput{} + } + + output = &DeleteReplicationTaskAssessmentRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteReplicationTaskAssessmentRun API operation for AWS Database Migration Service. +// +// Deletes the record of a single premigration assessment run. +// +// This operation removes all metadata that AWS DMS maintains about this assessment +// run. However, the operation leaves untouched all information about this assessment +// run that is stored in your Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DeleteReplicationTaskAssessmentRun for usage and error information. +// +// Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DeleteReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRun(input *DeleteReplicationTaskAssessmentRunInput) (*DeleteReplicationTaskAssessmentRunOutput, error) { + req, out := c.DeleteReplicationTaskAssessmentRunRequest(input) + return out, req.Send() +} + +// DeleteReplicationTaskAssessmentRunWithContext is the same as DeleteReplicationTaskAssessmentRun with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteReplicationTaskAssessmentRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRunWithContext(ctx aws.Context, input *DeleteReplicationTaskAssessmentRunInput, opts ...request.Option) (*DeleteReplicationTaskAssessmentRunOutput, error) { + req, out := c.DeleteReplicationTaskAssessmentRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAccountAttributes = "DescribeAccountAttributes" // DescribeAccountAttributesRequest generates a "aws/request.Request" representing the @@ -1377,6 +1559,171 @@ func (c *DatabaseMigrationService) DescribeAccountAttributesWithContext(ctx aws. return out, req.Send() } +const opDescribeApplicableIndividualAssessments = "DescribeApplicableIndividualAssessments" + +// DescribeApplicableIndividualAssessmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApplicableIndividualAssessments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeApplicableIndividualAssessments for more information on using the DescribeApplicableIndividualAssessments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeApplicableIndividualAssessmentsRequest method. +// req, resp := client.DescribeApplicableIndividualAssessmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeApplicableIndividualAssessments +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsRequest(input *DescribeApplicableIndividualAssessmentsInput) (req *request.Request, output *DescribeApplicableIndividualAssessmentsOutput) { + op := &request.Operation{ + Name: opDescribeApplicableIndividualAssessments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeApplicableIndividualAssessmentsInput{} + } + + output = &DescribeApplicableIndividualAssessmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeApplicableIndividualAssessments API operation for AWS Database Migration Service. +// +// Provides a list of individual assessments that you can specify for a new +// premigration assessment run, given one or more parameters. +// +// If you specify an existing migration task, this operation provides the default +// individual assessments you can specify for that task. Otherwise, the specified +// parameters model elements of a possible migration task on which to base a +// premigration assessment run. +// +// To use these migration task modeling parameters, you must specify an existing +// replication instance, a source database engine, a target database engine, +// and a migration type. This combination of parameters potentially limits the +// default individual assessments available for an assessment run created for +// a corresponding migration task. +// +// If you specify no parameters, this operation provides a list of all possible +// individual assessments that you can specify for an assessment run. If you +// specify any one of the task modeling parameters, you must specify all of +// them or the operation cannot provide a list of individual assessments. The +// only parameter that you can specify alone is for an existing migration task. +// The specified task definition then determines the default list of individual +// assessments that you can specify in an assessment run for the task. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DescribeApplicableIndividualAssessments for usage and error information. +// +// Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeApplicableIndividualAssessments +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessments(input *DescribeApplicableIndividualAssessmentsInput) (*DescribeApplicableIndividualAssessmentsOutput, error) { + req, out := c.DescribeApplicableIndividualAssessmentsRequest(input) + return out, req.Send() +} + +// DescribeApplicableIndividualAssessmentsWithContext is the same as DescribeApplicableIndividualAssessments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeApplicableIndividualAssessments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsWithContext(ctx aws.Context, input *DescribeApplicableIndividualAssessmentsInput, opts ...request.Option) (*DescribeApplicableIndividualAssessmentsOutput, error) { + req, out := c.DescribeApplicableIndividualAssessmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeApplicableIndividualAssessmentsPages iterates over the pages of a DescribeApplicableIndividualAssessments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeApplicableIndividualAssessments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeApplicableIndividualAssessments operation. +// pageNum := 0 +// err := client.DescribeApplicableIndividualAssessmentsPages(params, +// func(page *databasemigrationservice.DescribeApplicableIndividualAssessmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsPages(input *DescribeApplicableIndividualAssessmentsInput, fn func(*DescribeApplicableIndividualAssessmentsOutput, bool) bool) error { + return c.DescribeApplicableIndividualAssessmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeApplicableIndividualAssessmentsPagesWithContext same as DescribeApplicableIndividualAssessmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsPagesWithContext(ctx aws.Context, input *DescribeApplicableIndividualAssessmentsInput, fn func(*DescribeApplicableIndividualAssessmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeApplicableIndividualAssessmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeApplicableIndividualAssessmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeApplicableIndividualAssessmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeCertificates = "DescribeCertificates" // DescribeCertificatesRequest generates a "aws/request.Request" representing the @@ -3182,32 +3529,316 @@ func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentResultsPages return p.Err() } -const opDescribeReplicationTasks = "DescribeReplicationTasks" +const opDescribeReplicationTaskAssessmentRuns = "DescribeReplicationTaskAssessmentRuns" -// DescribeReplicationTasksRequest generates a "aws/request.Request" representing the -// client's request for the DescribeReplicationTasks operation. The "output" return +// DescribeReplicationTaskAssessmentRunsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationTaskAssessmentRuns operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeReplicationTasks for more information on using the DescribeReplicationTasks +// See DescribeReplicationTaskAssessmentRuns for more information on using the DescribeReplicationTaskAssessmentRuns // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeReplicationTasksRequest method. -// req, resp := client.DescribeReplicationTasksRequest(params) +// // Example sending a request using the DescribeReplicationTaskAssessmentRunsRequest method. +// req, resp := client.DescribeReplicationTaskAssessmentRunsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTasks +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskAssessmentRuns +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsRequest(input *DescribeReplicationTaskAssessmentRunsInput) (req *request.Request, output *DescribeReplicationTaskAssessmentRunsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationTaskAssessmentRuns, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplicationTaskAssessmentRunsInput{} + } + + output = &DescribeReplicationTaskAssessmentRunsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReplicationTaskAssessmentRuns API operation for AWS Database Migration Service. +// +// Returns a paginated list of premigration assessment runs based on filter +// settings. +// +// These filter settings can specify a combination of premigration assessment +// runs, migration tasks, replication instances, and assessment run status values. +// +// This operation doesn't return information about individual assessments. For +// this information, see the DescribeReplicationTaskIndividualAssessments operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DescribeReplicationTaskAssessmentRuns for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundFault +// The resource could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskAssessmentRuns +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRuns(input *DescribeReplicationTaskAssessmentRunsInput) (*DescribeReplicationTaskAssessmentRunsOutput, error) { + req, out := c.DescribeReplicationTaskAssessmentRunsRequest(input) + return out, req.Send() +} + +// DescribeReplicationTaskAssessmentRunsWithContext is the same as DescribeReplicationTaskAssessmentRuns with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReplicationTaskAssessmentRuns for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsWithContext(ctx aws.Context, input *DescribeReplicationTaskAssessmentRunsInput, opts ...request.Option) (*DescribeReplicationTaskAssessmentRunsOutput, error) { + req, out := c.DescribeReplicationTaskAssessmentRunsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeReplicationTaskAssessmentRunsPages iterates over the pages of a DescribeReplicationTaskAssessmentRuns operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplicationTaskAssessmentRuns method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplicationTaskAssessmentRuns operation. +// pageNum := 0 +// err := client.DescribeReplicationTaskAssessmentRunsPages(params, +// func(page *databasemigrationservice.DescribeReplicationTaskAssessmentRunsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsPages(input *DescribeReplicationTaskAssessmentRunsInput, fn func(*DescribeReplicationTaskAssessmentRunsOutput, bool) bool) error { + return c.DescribeReplicationTaskAssessmentRunsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReplicationTaskAssessmentRunsPagesWithContext same as DescribeReplicationTaskAssessmentRunsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsPagesWithContext(ctx aws.Context, input *DescribeReplicationTaskAssessmentRunsInput, fn func(*DescribeReplicationTaskAssessmentRunsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReplicationTaskAssessmentRunsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReplicationTaskAssessmentRunsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReplicationTaskAssessmentRunsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeReplicationTaskIndividualAssessments = "DescribeReplicationTaskIndividualAssessments" + +// DescribeReplicationTaskIndividualAssessmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationTaskIndividualAssessments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReplicationTaskIndividualAssessments for more information on using the DescribeReplicationTaskIndividualAssessments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReplicationTaskIndividualAssessmentsRequest method. +// req, resp := client.DescribeReplicationTaskIndividualAssessmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskIndividualAssessments +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsRequest(input *DescribeReplicationTaskIndividualAssessmentsInput) (req *request.Request, output *DescribeReplicationTaskIndividualAssessmentsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationTaskIndividualAssessments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplicationTaskIndividualAssessmentsInput{} + } + + output = &DescribeReplicationTaskIndividualAssessmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReplicationTaskIndividualAssessments API operation for AWS Database Migration Service. +// +// Returns a paginated list of individual assessments based on filter settings. +// +// These filter settings can specify a combination of premigration assessment +// runs, migration tasks, and assessment status values. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DescribeReplicationTaskIndividualAssessments for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundFault +// The resource could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskIndividualAssessments +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessments(input *DescribeReplicationTaskIndividualAssessmentsInput) (*DescribeReplicationTaskIndividualAssessmentsOutput, error) { + req, out := c.DescribeReplicationTaskIndividualAssessmentsRequest(input) + return out, req.Send() +} + +// DescribeReplicationTaskIndividualAssessmentsWithContext is the same as DescribeReplicationTaskIndividualAssessments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReplicationTaskIndividualAssessments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsWithContext(ctx aws.Context, input *DescribeReplicationTaskIndividualAssessmentsInput, opts ...request.Option) (*DescribeReplicationTaskIndividualAssessmentsOutput, error) { + req, out := c.DescribeReplicationTaskIndividualAssessmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeReplicationTaskIndividualAssessmentsPages iterates over the pages of a DescribeReplicationTaskIndividualAssessments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplicationTaskIndividualAssessments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplicationTaskIndividualAssessments operation. +// pageNum := 0 +// err := client.DescribeReplicationTaskIndividualAssessmentsPages(params, +// func(page *databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsPages(input *DescribeReplicationTaskIndividualAssessmentsInput, fn func(*DescribeReplicationTaskIndividualAssessmentsOutput, bool) bool) error { + return c.DescribeReplicationTaskIndividualAssessmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReplicationTaskIndividualAssessmentsPagesWithContext same as DescribeReplicationTaskIndividualAssessmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsPagesWithContext(ctx aws.Context, input *DescribeReplicationTaskIndividualAssessmentsInput, fn func(*DescribeReplicationTaskIndividualAssessmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReplicationTaskIndividualAssessmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReplicationTaskIndividualAssessmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReplicationTaskIndividualAssessmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeReplicationTasks = "DescribeReplicationTasks" + +// DescribeReplicationTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReplicationTasks for more information on using the DescribeReplicationTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReplicationTasksRequest method. +// req, resp := client.DescribeReplicationTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTasks func (c *DatabaseMigrationService) DescribeReplicationTasksRequest(input *DescribeReplicationTasksInput) (req *request.Request, output *DescribeReplicationTasksOutput) { op := &request.Operation{ Name: opDescribeReplicationTasks, @@ -4776,60 +5407,72 @@ func (c *DatabaseMigrationService) StartReplicationTaskAssessmentWithContext(ctx return out, req.Send() } -const opStopReplicationTask = "StopReplicationTask" +const opStartReplicationTaskAssessmentRun = "StartReplicationTaskAssessmentRun" -// StopReplicationTaskRequest generates a "aws/request.Request" representing the -// client's request for the StopReplicationTask operation. The "output" return +// StartReplicationTaskAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the StartReplicationTaskAssessmentRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopReplicationTask for more information on using the StopReplicationTask +// See StartReplicationTaskAssessmentRun for more information on using the StartReplicationTaskAssessmentRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopReplicationTaskRequest method. -// req, resp := client.StopReplicationTaskRequest(params) +// // Example sending a request using the StartReplicationTaskAssessmentRunRequest method. +// req, resp := client.StartReplicationTaskAssessmentRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask -func (c *DatabaseMigrationService) StopReplicationTaskRequest(input *StopReplicationTaskInput) (req *request.Request, output *StopReplicationTaskOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StartReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRunRequest(input *StartReplicationTaskAssessmentRunInput) (req *request.Request, output *StartReplicationTaskAssessmentRunOutput) { op := &request.Operation{ - Name: opStopReplicationTask, + Name: opStartReplicationTaskAssessmentRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StopReplicationTaskInput{} + input = &StartReplicationTaskAssessmentRunInput{} } - output = &StopReplicationTaskOutput{} + output = &StartReplicationTaskAssessmentRunOutput{} req = c.newRequest(op, input, output) return } -// StopReplicationTask API operation for AWS Database Migration Service. +// StartReplicationTaskAssessmentRun API operation for AWS Database Migration Service. // -// Stops the replication task. +// Starts a new premigration assessment run for one or more individual assessments +// of a migration task. +// +// The assessments that you can specify depend on the source and target database +// engine and the migration type defined for the given task. To run this operation, +// your migration task must already be created. After you run this operation, +// you can review the status of each individual assessment. You can also run +// the migration task manually after the assessment run and its individual assessments +// complete. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Database Migration Service's -// API operation StopReplicationTask for usage and error information. +// API operation StartReplicationTaskAssessmentRun for usage and error information. // // Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// // * ResourceNotFoundFault // The resource could not be found. // @@ -4837,22 +5480,134 @@ func (c *DatabaseMigrationService) StopReplicationTaskRequest(input *StopReplica // The resource is in a state that prevents it from being used for database // migration. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask -func (c *DatabaseMigrationService) StopReplicationTask(input *StopReplicationTaskInput) (*StopReplicationTaskOutput, error) { - req, out := c.StopReplicationTaskRequest(input) - return out, req.Send() -} - -// StopReplicationTaskWithContext is the same as StopReplicationTask with the addition of -// the ability to pass a context and additional request options. +// * KMSAccessDeniedFault +// The ciphertext references a key that doesn't exist or that the DMS account +// doesn't have access to. // -// See StopReplicationTask for details on how to use this API operation. +// * KMSDisabledFault +// The specified master key (CMK) isn't enabled. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DatabaseMigrationService) StopReplicationTaskWithContext(ctx aws.Context, input *StopReplicationTaskInput, opts ...request.Option) (*StopReplicationTaskOutput, error) { +// * KMSFault +// An AWS Key Management Service (AWS KMS) error is preventing access to AWS +// KMS. +// +// * KMSInvalidStateFault +// The state of the specified AWS KMS resource isn't valid for this request. +// +// * KMSNotFoundFault +// The specified AWS KMS entity or resource can't be found. +// +// * KMSKeyNotAccessibleFault +// AWS DMS cannot access the AWS KMS key. +// +// * S3AccessDeniedFault +// Insufficient privileges are preventing access to an Amazon S3 object. +// +// * S3ResourceNotFoundFault +// A specified Amazon S3 bucket, bucket folder, or other object can't be found. +// +// * ResourceAlreadyExistsFault +// The resource you are attempting to create already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StartReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRun(input *StartReplicationTaskAssessmentRunInput) (*StartReplicationTaskAssessmentRunOutput, error) { + req, out := c.StartReplicationTaskAssessmentRunRequest(input) + return out, req.Send() +} + +// StartReplicationTaskAssessmentRunWithContext is the same as StartReplicationTaskAssessmentRun with the addition of +// the ability to pass a context and additional request options. +// +// See StartReplicationTaskAssessmentRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRunWithContext(ctx aws.Context, input *StartReplicationTaskAssessmentRunInput, opts ...request.Option) (*StartReplicationTaskAssessmentRunOutput, error) { + req, out := c.StartReplicationTaskAssessmentRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopReplicationTask = "StopReplicationTask" + +// StopReplicationTaskRequest generates a "aws/request.Request" representing the +// client's request for the StopReplicationTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopReplicationTask for more information on using the StopReplicationTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopReplicationTaskRequest method. +// req, resp := client.StopReplicationTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask +func (c *DatabaseMigrationService) StopReplicationTaskRequest(input *StopReplicationTaskInput) (req *request.Request, output *StopReplicationTaskOutput) { + op := &request.Operation{ + Name: opStopReplicationTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopReplicationTaskInput{} + } + + output = &StopReplicationTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopReplicationTask API operation for AWS Database Migration Service. +// +// Stops the replication task. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation StopReplicationTask for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask +func (c *DatabaseMigrationService) StopReplicationTask(input *StopReplicationTaskInput) (*StopReplicationTaskOutput, error) { + req, out := c.StopReplicationTaskRequest(input) + return out, req.Send() +} + +// StopReplicationTaskWithContext is the same as StopReplicationTask with the addition of +// the ability to pass a context and additional request options. +// +// See StopReplicationTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) StopReplicationTaskWithContext(ctx aws.Context, input *StopReplicationTaskInput, opts ...request.Option) (*StopReplicationTaskOutput, error) { req, out := c.StopReplicationTaskRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) @@ -5244,6 +5999,67 @@ func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { return s } +type CancelReplicationTaskAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the premigration assessment run to be canceled. + // + // ReplicationTaskAssessmentRunArn is a required field + ReplicationTaskAssessmentRunArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelReplicationTaskAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReplicationTaskAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelReplicationTaskAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelReplicationTaskAssessmentRunInput"} + if s.ReplicationTaskAssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskAssessmentRunArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *CancelReplicationTaskAssessmentRunInput) SetReplicationTaskAssessmentRunArn(v string) *CancelReplicationTaskAssessmentRunInput { + s.ReplicationTaskAssessmentRunArn = &v + return s +} + +type CancelReplicationTaskAssessmentRunOutput struct { + _ struct{} `type:"structure"` + + // The ReplicationTaskAssessmentRun object for the canceled assessment run. + ReplicationTaskAssessmentRun *ReplicationTaskAssessmentRun `type:"structure"` +} + +// String returns the string representation +func (s CancelReplicationTaskAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReplicationTaskAssessmentRunOutput) GoString() string { + return s.String() +} + +// SetReplicationTaskAssessmentRun sets the ReplicationTaskAssessmentRun field's value. +func (s *CancelReplicationTaskAssessmentRunOutput) SetReplicationTaskAssessmentRun(v *ReplicationTaskAssessmentRun) *CancelReplicationTaskAssessmentRunOutput { + s.ReplicationTaskAssessmentRun = v + return s +} + // The SSL certificate that can be used to encrypt connections between the endpoints // and the replication instance. type Certificate struct { @@ -5506,6 +6322,12 @@ type CreateEndpointInput struct { // in the AWS Database Migration Service User Guide. ExtraConnectionAttributes *string `type:"string"` + // Settings in JSON format for the source IBM Db2 LUW endpoint. For information + // about other available settings, see Extra connection attributes when using + // Db2 LUW as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + IBMDb2Settings *IBMDb2Settings `type:"structure"` + // Settings in JSON format for the target Apache Kafka endpoint. For more information // about the available settings, see Using Apache Kafka as a Target for AWS // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) @@ -5528,24 +6350,56 @@ type CreateEndpointInput struct { // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` + // Settings in JSON format for the source and target Microsoft SQL Server endpoint. + // For information about other available settings, see Extra connection attributes + // when using SQL Server as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.ConnectionAttrib) + // and Extra connection attributes when using SQL Server as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` + // Settings in JSON format for the source MongoDB endpoint. For more information // about the available settings, see Using MongoDB as a Target for AWS Database // Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html#CHAP_Source.MongoDB.Configuration) // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` + // Settings in JSON format for the source and target MySQL endpoint. For information + // about other available settings, see Extra connection attributes when using + // MySQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.ConnectionAttrib) + // and Extra connection attributes when using a MySQL-compatible database as + // a target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + MySQLSettings *MySQLSettings `type:"structure"` + // Settings in JSON format for the target Amazon Neptune endpoint. For more // information about the available settings, see Specifying Endpoint Settings // for Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) // in the AWS Database Migration Service User Guide. NeptuneSettings *NeptuneSettings `type:"structure"` + // Settings in JSON format for the source and target Oracle endpoint. For information + // about other available settings, see Extra connection attributes when using + // Oracle as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.ConnectionAttrib) + // and Extra connection attributes when using Oracle as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + OracleSettings *OracleSettings `type:"structure"` + // The password to be used to log in to the endpoint database. Password *string `type:"string" sensitive:"true"` // The port used by the endpoint database. Port *int64 `type:"integer"` + // Settings in JSON format for the source and target PostgreSQL endpoint. For + // information about other available settings, see Extra connection attributes + // when using PostgreSQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.ConnectionAttrib) + // and Extra connection attributes when using PostgreSQL as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + PostgreSQLSettings *PostgreSQLSettings `type:"structure"` + // Provides information that defines an Amazon Redshift endpoint. RedshiftSettings *RedshiftSettings `type:"structure"` @@ -5566,6 +6420,14 @@ type CreateEndpointInput struct { // is none SslMode *string `type:"string" enum:"DmsSslModeValue"` + // Settings in JSON format for the source and target SAP ASE endpoint. For information + // about other available settings, see Extra connection attributes when using + // SAP ASE as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.ConnectionAttrib) + // and Extra connection attributes when using SAP ASE as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + SybaseSettings *SybaseSettings `type:"structure"` + // One or more tags to be assigned to the endpoint. Tags []*Tag `type:"list"` @@ -5677,6 +6539,12 @@ func (s *CreateEndpointInput) SetExtraConnectionAttributes(v string) *CreateEndp return s } +// SetIBMDb2Settings sets the IBMDb2Settings field's value. +func (s *CreateEndpointInput) SetIBMDb2Settings(v *IBMDb2Settings) *CreateEndpointInput { + s.IBMDb2Settings = v + return s +} + // SetKafkaSettings sets the KafkaSettings field's value. func (s *CreateEndpointInput) SetKafkaSettings(v *KafkaSettings) *CreateEndpointInput { s.KafkaSettings = v @@ -5695,18 +6563,36 @@ func (s *CreateEndpointInput) SetKmsKeyId(v string) *CreateEndpointInput { return s } +// SetMicrosoftSQLServerSettings sets the MicrosoftSQLServerSettings field's value. +func (s *CreateEndpointInput) SetMicrosoftSQLServerSettings(v *MicrosoftSQLServerSettings) *CreateEndpointInput { + s.MicrosoftSQLServerSettings = v + return s +} + // SetMongoDbSettings sets the MongoDbSettings field's value. func (s *CreateEndpointInput) SetMongoDbSettings(v *MongoDbSettings) *CreateEndpointInput { s.MongoDbSettings = v return s } +// SetMySQLSettings sets the MySQLSettings field's value. +func (s *CreateEndpointInput) SetMySQLSettings(v *MySQLSettings) *CreateEndpointInput { + s.MySQLSettings = v + return s +} + // SetNeptuneSettings sets the NeptuneSettings field's value. func (s *CreateEndpointInput) SetNeptuneSettings(v *NeptuneSettings) *CreateEndpointInput { s.NeptuneSettings = v return s } +// SetOracleSettings sets the OracleSettings field's value. +func (s *CreateEndpointInput) SetOracleSettings(v *OracleSettings) *CreateEndpointInput { + s.OracleSettings = v + return s +} + // SetPassword sets the Password field's value. func (s *CreateEndpointInput) SetPassword(v string) *CreateEndpointInput { s.Password = &v @@ -5719,6 +6605,12 @@ func (s *CreateEndpointInput) SetPort(v int64) *CreateEndpointInput { return s } +// SetPostgreSQLSettings sets the PostgreSQLSettings field's value. +func (s *CreateEndpointInput) SetPostgreSQLSettings(v *PostgreSQLSettings) *CreateEndpointInput { + s.PostgreSQLSettings = v + return s +} + // SetRedshiftSettings sets the RedshiftSettings field's value. func (s *CreateEndpointInput) SetRedshiftSettings(v *RedshiftSettings) *CreateEndpointInput { s.RedshiftSettings = v @@ -5749,6 +6641,12 @@ func (s *CreateEndpointInput) SetSslMode(v string) *CreateEndpointInput { return s } +// SetSybaseSettings sets the SybaseSettings field's value. +func (s *CreateEndpointInput) SetSybaseSettings(v *SybaseSettings) *CreateEndpointInput { + s.SybaseSettings = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateEndpointInput) SetTags(v []*Tag) *CreateEndpointInput { s.Tags = v @@ -6856,6 +7754,67 @@ func (s DeleteReplicationSubnetGroupOutput) GoString() string { return s.String() } +type DeleteReplicationTaskAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the premigration assessment run to be deleted. + // + // ReplicationTaskAssessmentRunArn is a required field + ReplicationTaskAssessmentRunArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReplicationTaskAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationTaskAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationTaskAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationTaskAssessmentRunInput"} + if s.ReplicationTaskAssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskAssessmentRunArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *DeleteReplicationTaskAssessmentRunInput) SetReplicationTaskAssessmentRunArn(v string) *DeleteReplicationTaskAssessmentRunInput { + s.ReplicationTaskAssessmentRunArn = &v + return s +} + +type DeleteReplicationTaskAssessmentRunOutput struct { + _ struct{} `type:"structure"` + + // The ReplicationTaskAssessmentRun object for the deleted assessment run. + ReplicationTaskAssessmentRun *ReplicationTaskAssessmentRun `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationTaskAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationTaskAssessmentRunOutput) GoString() string { + return s.String() +} + +// SetReplicationTaskAssessmentRun sets the ReplicationTaskAssessmentRun field's value. +func (s *DeleteReplicationTaskAssessmentRunOutput) SetReplicationTaskAssessmentRun(v *ReplicationTaskAssessmentRun) *DeleteReplicationTaskAssessmentRunOutput { + s.ReplicationTaskAssessmentRun = v + return s +} + type DeleteReplicationTaskInput struct { _ struct{} `type:"structure"` @@ -6972,52 +7931,178 @@ func (s *DescribeAccountAttributesOutput) SetUniqueAccountIdentifier(v string) * return s } -type DescribeCertificatesInput struct { +type DescribeApplicableIndividualAssessmentsInput struct { _ struct{} `type:"structure"` - // Filters applied to the certificate described in the form of key-value pairs. - Filters []*Filter `type:"list"` - - // An optional pagination token provided by a previous request. If this parameter + // Optional pagination token provided by a previous request. If this parameter // is specified, the response includes only records beyond the marker, up to // the value specified by MaxRecords. Marker *string `type:"string"` - // The maximum number of records to include in the response. If more records - // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. - // - // Default: 10 + // Maximum number of records to include in the response. If more records exist + // than the specified MaxRecords value, a pagination token called a marker is + // included in the response so that the remaining results can be retrieved. MaxRecords *int64 `type:"integer"` + + // Name of the migration type that each provided individual assessment must + // support. + MigrationType *string `type:"string" enum:"MigrationTypeValue"` + + // ARN of a replication instance on which you want to base the default list + // of individual assessments. + ReplicationInstanceArn *string `type:"string"` + + // Amazon Resource Name (ARN) of a migration task on which you want to base + // the default list of individual assessments. + ReplicationTaskArn *string `type:"string"` + + // Name of a database engine that the specified replication instance supports + // as a source. + SourceEngineName *string `type:"string"` + + // Name of a database engine that the specified replication instance supports + // as a target. + TargetEngineName *string `type:"string"` } // String returns the string representation -func (s DescribeCertificatesInput) String() string { +func (s DescribeApplicableIndividualAssessmentsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCertificatesInput) GoString() string { +func (s DescribeApplicableIndividualAssessmentsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeCertificatesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeCertificatesInput"} - if s.Filters != nil { - for i, v := range s.Filters { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) - } - } - } +// SetMarker sets the Marker field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetMarker(v string) *DescribeApplicableIndividualAssessmentsInput { + s.Marker = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetMaxRecords(v int64) *DescribeApplicableIndividualAssessmentsInput { + s.MaxRecords = &v + return s +} + +// SetMigrationType sets the MigrationType field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetMigrationType(v string) *DescribeApplicableIndividualAssessmentsInput { + s.MigrationType = &v + return s +} + +// SetReplicationInstanceArn sets the ReplicationInstanceArn field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetReplicationInstanceArn(v string) *DescribeApplicableIndividualAssessmentsInput { + s.ReplicationInstanceArn = &v + return s +} + +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetReplicationTaskArn(v string) *DescribeApplicableIndividualAssessmentsInput { + s.ReplicationTaskArn = &v + return s +} + +// SetSourceEngineName sets the SourceEngineName field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetSourceEngineName(v string) *DescribeApplicableIndividualAssessmentsInput { + s.SourceEngineName = &v + return s +} + +// SetTargetEngineName sets the TargetEngineName field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetTargetEngineName(v string) *DescribeApplicableIndividualAssessmentsInput { + s.TargetEngineName = &v + return s +} + +type DescribeApplicableIndividualAssessmentsOutput struct { + _ struct{} `type:"structure"` + + // List of names for the individual assessments supported by the premigration + // assessment run that you start based on the specified request parameters. + // For more information on the available individual assessments, including compatibility + // with different migration task configurations, see Working with premigration + // assessment runs (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.AssessmentReport.html) + // in the AWS Database Migration Service User Guide. + IndividualAssessmentNames []*string `type:"list"` + + // Pagination token returned for you to pass to a subsequent request. If you + // pass this token as the Marker value in a subsequent request, the response + // includes only records beyond the marker, up to the value specified in the + // request by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeApplicableIndividualAssessmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicableIndividualAssessmentsOutput) GoString() string { + return s.String() +} + +// SetIndividualAssessmentNames sets the IndividualAssessmentNames field's value. +func (s *DescribeApplicableIndividualAssessmentsOutput) SetIndividualAssessmentNames(v []*string) *DescribeApplicableIndividualAssessmentsOutput { + s.IndividualAssessmentNames = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeApplicableIndividualAssessmentsOutput) SetMarker(v string) *DescribeApplicableIndividualAssessmentsOutput { + s.Marker = &v + return s +} + +type DescribeCertificatesInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the certificates described in the form of key-value pairs. + Filters []*Filter `type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 10 + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCertificatesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } return nil } @@ -7180,7 +8265,7 @@ func (s *DescribeConnectionsOutput) SetMarker(v string) *DescribeConnectionsOutp type DescribeEndpointTypesInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to the endpoint types. // // Valid filter names: engine-name | endpoint-type Filters []*Filter `type:"list"` @@ -7285,7 +8370,7 @@ func (s *DescribeEndpointTypesOutput) SetSupportedEndpointTypes(v []*SupportedEn type DescribeEndpointsInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to the endpoints. // // Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name Filters []*Filter `type:"list"` @@ -7390,7 +8475,7 @@ func (s *DescribeEndpointsOutput) SetMarker(v string) *DescribeEndpointsOutput { type DescribeEventCategoriesInput struct { _ struct{} `type:"structure"` - // Filters applied to the action. + // Filters applied to the event categories. Filters []*Filter `type:"list"` // The type of AWS DMS resource that generates events. @@ -7467,7 +8552,7 @@ func (s *DescribeEventCategoriesOutput) SetEventCategoryGroupList(v []*EventCate type DescribeEventSubscriptionsInput struct { _ struct{} `type:"structure"` - // Filters applied to the action. + // Filters applied to event subscriptions. Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -7588,7 +8673,7 @@ type DescribeEventsInput struct { // A list of event categories for the source type that you've chosen. EventCategories []*string `type:"list"` - // Filters applied to the action. + // Filters applied to events. Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -8092,7 +9177,7 @@ func (s *DescribeReplicationInstanceTaskLogsOutput) SetReplicationInstanceTaskLo type DescribeReplicationInstancesInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to replication instances. // // Valid filter names: replication-instance-arn | replication-instance-id | // replication-instance-class | engine-version @@ -8198,7 +9283,7 @@ func (s *DescribeReplicationInstancesOutput) SetReplicationInstances(v []*Replic type DescribeReplicationSubnetGroupsInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to replication subnet groups. // // Valid filter names: replication-subnet-group-id Filters []*Filter `type:"list"` @@ -8394,10 +9479,218 @@ func (s *DescribeReplicationTaskAssessmentResultsOutput) SetReplicationTaskAsses return s } +type DescribeReplicationTaskAssessmentRunsInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the premigration assessment runs described in the form + // of key-value pairs. + // + // Valid filter names: replication-task-assessment-run-arn, replication-task-arn, + // replication-instance-arn, status + Filters []*Filter `type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeReplicationTaskAssessmentRunsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationTaskAssessmentRunsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReplicationTaskAssessmentRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationTaskAssessmentRunsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeReplicationTaskAssessmentRunsInput) SetFilters(v []*Filter) *DescribeReplicationTaskAssessmentRunsInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeReplicationTaskAssessmentRunsInput) SetMarker(v string) *DescribeReplicationTaskAssessmentRunsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeReplicationTaskAssessmentRunsInput) SetMaxRecords(v int64) *DescribeReplicationTaskAssessmentRunsInput { + s.MaxRecords = &v + return s +} + +type DescribeReplicationTaskAssessmentRunsOutput struct { + _ struct{} `type:"structure"` + + // A pagination token returned for you to pass to a subsequent request. If you + // pass this token as the Marker value in a subsequent request, the response + // includes only records beyond the marker, up to the value specified in the + // request by MaxRecords. + Marker *string `type:"string"` + + // One or more premigration assessment runs as specified by Filters. + ReplicationTaskAssessmentRuns []*ReplicationTaskAssessmentRun `type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationTaskAssessmentRunsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationTaskAssessmentRunsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeReplicationTaskAssessmentRunsOutput) SetMarker(v string) *DescribeReplicationTaskAssessmentRunsOutput { + s.Marker = &v + return s +} + +// SetReplicationTaskAssessmentRuns sets the ReplicationTaskAssessmentRuns field's value. +func (s *DescribeReplicationTaskAssessmentRunsOutput) SetReplicationTaskAssessmentRuns(v []*ReplicationTaskAssessmentRun) *DescribeReplicationTaskAssessmentRunsOutput { + s.ReplicationTaskAssessmentRuns = v + return s +} + +type DescribeReplicationTaskIndividualAssessmentsInput struct { + _ struct{} `type:"structure"` + + // Filters applied to the individual assessments described in the form of key-value + // pairs. + // + // Valid filter names: replication-task-assessment-run-arn, replication-task-arn, + // status + Filters []*Filter `type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeReplicationTaskIndividualAssessmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationTaskIndividualAssessmentsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReplicationTaskIndividualAssessmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationTaskIndividualAssessmentsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeReplicationTaskIndividualAssessmentsInput) SetFilters(v []*Filter) *DescribeReplicationTaskIndividualAssessmentsInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeReplicationTaskIndividualAssessmentsInput) SetMarker(v string) *DescribeReplicationTaskIndividualAssessmentsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeReplicationTaskIndividualAssessmentsInput) SetMaxRecords(v int64) *DescribeReplicationTaskIndividualAssessmentsInput { + s.MaxRecords = &v + return s +} + +type DescribeReplicationTaskIndividualAssessmentsOutput struct { + _ struct{} `type:"structure"` + + // A pagination token returned for you to pass to a subsequent request. If you + // pass this token as the Marker value in a subsequent request, the response + // includes only records beyond the marker, up to the value specified in the + // request by MaxRecords. + Marker *string `type:"string"` + + // One or more individual assessments as specified by Filters. + ReplicationTaskIndividualAssessments []*ReplicationTaskIndividualAssessment `type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationTaskIndividualAssessmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationTaskIndividualAssessmentsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeReplicationTaskIndividualAssessmentsOutput) SetMarker(v string) *DescribeReplicationTaskIndividualAssessmentsOutput { + s.Marker = &v + return s +} + +// SetReplicationTaskIndividualAssessments sets the ReplicationTaskIndividualAssessments field's value. +func (s *DescribeReplicationTaskIndividualAssessmentsOutput) SetReplicationTaskIndividualAssessments(v []*ReplicationTaskIndividualAssessment) *DescribeReplicationTaskIndividualAssessmentsOutput { + s.ReplicationTaskIndividualAssessments = v + return s +} + type DescribeReplicationTasksInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to replication tasks. // // Valid filter names: replication-task-arn | replication-task-id | migration-type // | endpoint-arn | replication-instance-arn @@ -8612,7 +9905,7 @@ func (s *DescribeSchemasOutput) SetSchemas(v []*string) *DescribeSchemasOutput { type DescribeTableStatisticsInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe table statistics action. + // Filters applied to table statistics. // // Valid filter names: schema-name | table-name | table-state // @@ -8924,7 +10217,7 @@ type Endpoint struct { // "BucketName": "string", "CompressionType": "none"|"gzip" } DmsTransferSettings *DmsTransferSettings `type:"structure"` - // The settings for the target DynamoDB database. For more information, see + // The settings for the DynamoDB target endpoint. For more information, see // the DynamoDBSettings structure. DynamoDbSettings *DynamoDbSettings `type:"structure"` @@ -8964,6 +10257,10 @@ type Endpoint struct { // Additional connection attributes used to connect to the endpoint. ExtraConnectionAttributes *string `type:"string"` + // The settings for the IBM Db2 LUW source endpoint. For more information, see + // the IBMDb2Settings structure. + IBMDb2Settings *IBMDb2Settings `type:"structure"` + // The settings for the Apache Kafka target endpoint. For more information, // see the KafkaSettings structure. KafkaSettings *KafkaSettings `type:"structure"` @@ -8982,17 +10279,33 @@ type Endpoint struct { // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` + // The settings for the Microsoft SQL Server source and target endpoint. For + // more information, see the MicrosoftSQLServerSettings structure. + MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` + // The settings for the MongoDB source endpoint. For more information, see the // MongoDbSettings structure. MongoDbSettings *MongoDbSettings `type:"structure"` + // The settings for the MySQL source and target endpoint. For more information, + // see the MySQLSettings structure. + MySQLSettings *MySQLSettings `type:"structure"` + // The settings for the Amazon Neptune target endpoint. For more information, // see the NeptuneSettings structure. NeptuneSettings *NeptuneSettings `type:"structure"` + // The settings for the Oracle source and target endpoint. For more information, + // see the OracleSettings structure. + OracleSettings *OracleSettings `type:"structure"` + // The port value used to access the endpoint. Port *int64 `type:"integer"` + // The settings for the PostgreSQL source and target endpoint. For more information, + // see the PostgreSQLSettings structure. + PostgreSQLSettings *PostgreSQLSettings `type:"structure"` + // Settings for the Amazon Redshift endpoint. RedshiftSettings *RedshiftSettings `type:"structure"` @@ -9012,6 +10325,10 @@ type Endpoint struct { // The status of the endpoint. Status *string `type:"string"` + // The settings for the SAP ASE source and target endpoint. For more information, + // see the SybaseSettings structure. + SybaseSettings *SybaseSettings `type:"structure"` + // The user name used to connect to the endpoint. Username *string `type:"string"` } @@ -9104,6 +10421,12 @@ func (s *Endpoint) SetExtraConnectionAttributes(v string) *Endpoint { return s } +// SetIBMDb2Settings sets the IBMDb2Settings field's value. +func (s *Endpoint) SetIBMDb2Settings(v *IBMDb2Settings) *Endpoint { + s.IBMDb2Settings = v + return s +} + // SetKafkaSettings sets the KafkaSettings field's value. func (s *Endpoint) SetKafkaSettings(v *KafkaSettings) *Endpoint { s.KafkaSettings = v @@ -9122,24 +10445,48 @@ func (s *Endpoint) SetKmsKeyId(v string) *Endpoint { return s } +// SetMicrosoftSQLServerSettings sets the MicrosoftSQLServerSettings field's value. +func (s *Endpoint) SetMicrosoftSQLServerSettings(v *MicrosoftSQLServerSettings) *Endpoint { + s.MicrosoftSQLServerSettings = v + return s +} + // SetMongoDbSettings sets the MongoDbSettings field's value. func (s *Endpoint) SetMongoDbSettings(v *MongoDbSettings) *Endpoint { s.MongoDbSettings = v return s } +// SetMySQLSettings sets the MySQLSettings field's value. +func (s *Endpoint) SetMySQLSettings(v *MySQLSettings) *Endpoint { + s.MySQLSettings = v + return s +} + // SetNeptuneSettings sets the NeptuneSettings field's value. func (s *Endpoint) SetNeptuneSettings(v *NeptuneSettings) *Endpoint { s.NeptuneSettings = v return s } +// SetOracleSettings sets the OracleSettings field's value. +func (s *Endpoint) SetOracleSettings(v *OracleSettings) *Endpoint { + s.OracleSettings = v + return s +} + // SetPort sets the Port field's value. func (s *Endpoint) SetPort(v int64) *Endpoint { s.Port = &v return s } +// SetPostgreSQLSettings sets the PostgreSQLSettings field's value. +func (s *Endpoint) SetPostgreSQLSettings(v *PostgreSQLSettings) *Endpoint { + s.PostgreSQLSettings = v + return s +} + // SetRedshiftSettings sets the RedshiftSettings field's value. func (s *Endpoint) SetRedshiftSettings(v *RedshiftSettings) *Endpoint { s.RedshiftSettings = v @@ -9176,6 +10523,12 @@ func (s *Endpoint) SetStatus(v string) *Endpoint { return s } +// SetSybaseSettings sets the SybaseSettings field's value. +func (s *Endpoint) SetSybaseSettings(v *SybaseSettings) *Endpoint { + s.SybaseSettings = v + return s +} + // SetUsername sets the Username field's value. func (s *Endpoint) SetUsername(v string) *Endpoint { s.Username = &v @@ -9392,17 +10745,19 @@ func (s *EventSubscription) SetSubscriptionCreationTime(v string) *EventSubscrip return s } -// Identifies the name and value of a source filter object used to limit the -// number and type of records transferred from your source to your target. +// Identifies the name and value of a filter object. This filter is used to +// limit the number and type of AWS DMS objects that are returned for a particular +// Describe* or similar operation. type Filter struct { _ struct{} `type:"structure"` - // The name of the filter. + // The name of the filter as specified for a Describe* or similar operation. // // Name is a required field Name *string `type:"string" required:"true"` - // The filter value. + // The filter value, which can specify one or more values used to narrow the + // returned results. // // Values is a required field Values []*string `type:"list" required:"true"` @@ -9446,6 +10801,66 @@ func (s *Filter) SetValues(v []*string) *Filter { return s } +// Provides information that defines an IBM Db2 LUW endpoint. +type IBMDb2Settings struct { + _ struct{} `type:"structure"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s IBMDb2Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IBMDb2Settings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *IBMDb2Settings) SetDatabaseName(v string) *IBMDb2Settings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *IBMDb2Settings) SetPassword(v string) *IBMDb2Settings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *IBMDb2Settings) SetPort(v int64) *IBMDb2Settings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *IBMDb2Settings) SetServerName(v string) *IBMDb2Settings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *IBMDb2Settings) SetUsername(v string) *IBMDb2Settings { + s.Username = &v + return s +} + type ImportCertificateInput struct { _ struct{} `type:"structure"` @@ -9876,6 +11291,63 @@ func (s *KMSDisabledFault) RequestID() string { return s.RespMetadata.RequestID } +// An AWS Key Management Service (AWS KMS) error is preventing access to AWS +// KMS. +type KMSFault struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s KMSFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KMSFault) GoString() string { + return s.String() +} + +func newErrorKMSFault(v protocol.ResponseMetadata) error { + return &KMSFault{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *KMSFault) Code() string { + return "KMSFault" +} + +// Message returns the exception's message. +func (s *KMSFault) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *KMSFault) OrigErr() error { + return nil +} + +func (s *KMSFault) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *KMSFault) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *KMSFault) RequestID() string { + return s.RespMetadata.RequestID +} + // The state of the specified AWS KMS resource isn't valid for this request. type KMSInvalidStateFault struct { _ struct{} `type:"structure"` @@ -10111,6 +11583,38 @@ type KafkaSettings struct { // "ec2-12-345-678-901.compute-1.amazonaws.com:2345". Broker *string `type:"string"` + // Shows detailed control information for table definition, column definition, + // and table and column changes in the Kafka message output. The default is + // False. + IncludeControlDetails *bool `type:"boolean"` + + // Shows the partition value within the Kafka message output, unless the partition + // type is schema-table-type. The default is False. + IncludePartitionValue *bool `type:"boolean"` + + // Includes any data definition language (DDL) operations that change the table + // in the control data, such as rename-table, drop-table, add-column, drop-column, + // and rename-column. The default is False. + IncludeTableAlterOperations *bool `type:"boolean"` + + // Provides detailed transaction information from the source database. This + // information includes a commit timestamp, a log position, and values for transaction_id, + // previous transaction_id, and transaction_record_id (the record offset within + // a transaction). The default is False. + IncludeTransactionDetails *bool `type:"boolean"` + + // The output format for the records created on the endpoint. The message format + // is JSON (default) or JSON_UNFORMATTED (a single line with no tab). + MessageFormat *string `type:"string" enum:"MessageFormatValue"` + + // Prefixes schema and table names to partition values, when the partition type + // is primary-key-type. Doing this increases data distribution among Kafka partitions. + // For example, suppose that a SysBench schema has thousands of tables and each + // table has only limited range for a primary key. In this case, the same primary + // key is sent from thousands of tables to the same partition, which causes + // throttling. The default is False. + PartitionIncludeSchemaTable *bool `type:"boolean"` + // The topic to which you migrate the data. If you don't specify a topic, AWS // DMS specifies "kafka-default-topic" as the migration topic. Topic *string `type:"string"` @@ -10132,6 +11636,42 @@ func (s *KafkaSettings) SetBroker(v string) *KafkaSettings { return s } +// SetIncludeControlDetails sets the IncludeControlDetails field's value. +func (s *KafkaSettings) SetIncludeControlDetails(v bool) *KafkaSettings { + s.IncludeControlDetails = &v + return s +} + +// SetIncludePartitionValue sets the IncludePartitionValue field's value. +func (s *KafkaSettings) SetIncludePartitionValue(v bool) *KafkaSettings { + s.IncludePartitionValue = &v + return s +} + +// SetIncludeTableAlterOperations sets the IncludeTableAlterOperations field's value. +func (s *KafkaSettings) SetIncludeTableAlterOperations(v bool) *KafkaSettings { + s.IncludeTableAlterOperations = &v + return s +} + +// SetIncludeTransactionDetails sets the IncludeTransactionDetails field's value. +func (s *KafkaSettings) SetIncludeTransactionDetails(v bool) *KafkaSettings { + s.IncludeTransactionDetails = &v + return s +} + +// SetMessageFormat sets the MessageFormat field's value. +func (s *KafkaSettings) SetMessageFormat(v string) *KafkaSettings { + s.MessageFormat = &v + return s +} + +// SetPartitionIncludeSchemaTable sets the PartitionIncludeSchemaTable field's value. +func (s *KafkaSettings) SetPartitionIncludeSchemaTable(v bool) *KafkaSettings { + s.PartitionIncludeSchemaTable = &v + return s +} + // SetTopic sets the Topic field's value. func (s *KafkaSettings) SetTopic(v string) *KafkaSettings { s.Topic = &v @@ -10304,6 +11844,66 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut return s } +// Provides information that defines a Microsoft SQL Server endpoint. +type MicrosoftSQLServerSettings struct { + _ struct{} `type:"structure"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s MicrosoftSQLServerSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MicrosoftSQLServerSettings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *MicrosoftSQLServerSettings) SetDatabaseName(v string) *MicrosoftSQLServerSettings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *MicrosoftSQLServerSettings) SetPassword(v string) *MicrosoftSQLServerSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *MicrosoftSQLServerSettings) SetPort(v int64) *MicrosoftSQLServerSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *MicrosoftSQLServerSettings) SetServerName(v string) *MicrosoftSQLServerSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *MicrosoftSQLServerSettings) SetUsername(v string) *MicrosoftSQLServerSettings { + s.Username = &v + return s +} + type ModifyEndpointInput struct { _ struct{} `type:"structure"` @@ -10371,6 +11971,12 @@ type ModifyEndpointInput struct { // pass the empty string ("") as an argument. ExtraConnectionAttributes *string `type:"string"` + // Settings in JSON format for the source IBM Db2 LUW endpoint. For information + // about other available settings, see Extra connection attributes when using + // Db2 LUW as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + IBMDb2Settings *IBMDb2Settings `type:"structure"` + // Settings in JSON format for the target Apache Kafka endpoint. For more information // about the available settings, see Using Apache Kafka as a Target for AWS // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) @@ -10383,24 +11989,56 @@ type ModifyEndpointInput struct { // in the AWS Database Migration Service User Guide. KinesisSettings *KinesisSettings `type:"structure"` + // Settings in JSON format for the source and target Microsoft SQL Server endpoint. + // For information about other available settings, see Extra connection attributes + // when using SQL Server as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.ConnectionAttrib) + // and Extra connection attributes when using SQL Server as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` + // Settings in JSON format for the source MongoDB endpoint. For more information // about the available settings, see the configuration properties section in // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` + // Settings in JSON format for the source and target MySQL endpoint. For information + // about other available settings, see Extra connection attributes when using + // MySQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.ConnectionAttrib) + // and Extra connection attributes when using a MySQL-compatible database as + // a target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + MySQLSettings *MySQLSettings `type:"structure"` + // Settings in JSON format for the target Amazon Neptune endpoint. For more // information about the available settings, see Specifying Endpoint Settings // for Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) // in the AWS Database Migration Service User Guide. NeptuneSettings *NeptuneSettings `type:"structure"` + // Settings in JSON format for the source and target Oracle endpoint. For information + // about other available settings, see Extra connection attributes when using + // Oracle as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.ConnectionAttrib) + // and Extra connection attributes when using Oracle as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + OracleSettings *OracleSettings `type:"structure"` + // The password to be used to login to the endpoint database. Password *string `type:"string" sensitive:"true"` // The port used by the endpoint database. Port *int64 `type:"integer"` + // Settings in JSON format for the source and target PostgreSQL endpoint. For + // information about other available settings, see Extra connection attributes + // when using PostgreSQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.ConnectionAttrib) + // and Extra connection attributes when using PostgreSQL as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + PostgreSQLSettings *PostgreSQLSettings `type:"structure"` + // Provides information that defines an Amazon Redshift endpoint. RedshiftSettings *RedshiftSettings `type:"structure"` @@ -10420,6 +12058,14 @@ type ModifyEndpointInput struct { // The SSL mode used to connect to the endpoint. The default value is none. SslMode *string `type:"string" enum:"DmsSslModeValue"` + // Settings in JSON format for the source and target SAP ASE endpoint. For information + // about other available settings, see Extra connection attributes when using + // SAP ASE as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.ConnectionAttrib) + // and Extra connection attributes when using SAP ASE as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + SybaseSettings *SybaseSettings `type:"structure"` + // The user name to be used to login to the endpoint database. Username *string `type:"string"` } @@ -10528,6 +12174,12 @@ func (s *ModifyEndpointInput) SetExtraConnectionAttributes(v string) *ModifyEndp return s } +// SetIBMDb2Settings sets the IBMDb2Settings field's value. +func (s *ModifyEndpointInput) SetIBMDb2Settings(v *IBMDb2Settings) *ModifyEndpointInput { + s.IBMDb2Settings = v + return s +} + // SetKafkaSettings sets the KafkaSettings field's value. func (s *ModifyEndpointInput) SetKafkaSettings(v *KafkaSettings) *ModifyEndpointInput { s.KafkaSettings = v @@ -10540,18 +12192,36 @@ func (s *ModifyEndpointInput) SetKinesisSettings(v *KinesisSettings) *ModifyEndp return s } +// SetMicrosoftSQLServerSettings sets the MicrosoftSQLServerSettings field's value. +func (s *ModifyEndpointInput) SetMicrosoftSQLServerSettings(v *MicrosoftSQLServerSettings) *ModifyEndpointInput { + s.MicrosoftSQLServerSettings = v + return s +} + // SetMongoDbSettings sets the MongoDbSettings field's value. func (s *ModifyEndpointInput) SetMongoDbSettings(v *MongoDbSettings) *ModifyEndpointInput { s.MongoDbSettings = v return s } +// SetMySQLSettings sets the MySQLSettings field's value. +func (s *ModifyEndpointInput) SetMySQLSettings(v *MySQLSettings) *ModifyEndpointInput { + s.MySQLSettings = v + return s +} + // SetNeptuneSettings sets the NeptuneSettings field's value. func (s *ModifyEndpointInput) SetNeptuneSettings(v *NeptuneSettings) *ModifyEndpointInput { s.NeptuneSettings = v return s } +// SetOracleSettings sets the OracleSettings field's value. +func (s *ModifyEndpointInput) SetOracleSettings(v *OracleSettings) *ModifyEndpointInput { + s.OracleSettings = v + return s +} + // SetPassword sets the Password field's value. func (s *ModifyEndpointInput) SetPassword(v string) *ModifyEndpointInput { s.Password = &v @@ -10564,6 +12234,12 @@ func (s *ModifyEndpointInput) SetPort(v int64) *ModifyEndpointInput { return s } +// SetPostgreSQLSettings sets the PostgreSQLSettings field's value. +func (s *ModifyEndpointInput) SetPostgreSQLSettings(v *PostgreSQLSettings) *ModifyEndpointInput { + s.PostgreSQLSettings = v + return s +} + // SetRedshiftSettings sets the RedshiftSettings field's value. func (s *ModifyEndpointInput) SetRedshiftSettings(v *RedshiftSettings) *ModifyEndpointInput { s.RedshiftSettings = v @@ -10594,6 +12270,12 @@ func (s *ModifyEndpointInput) SetSslMode(v string) *ModifyEndpointInput { return s } +// SetSybaseSettings sets the SybaseSettings field's value. +func (s *ModifyEndpointInput) SetSybaseSettings(v *SybaseSettings) *ModifyEndpointInput { + s.SybaseSettings = v + return s +} + // SetUsername sets the Username field's value. func (s *ModifyEndpointInput) SetUsername(v string) *ModifyEndpointInput { s.Username = &v @@ -11322,24 +13004,84 @@ func (s *MongoDbSettings) SetUsername(v string) *MongoDbSettings { return s } -// Provides information that defines an Amazon Neptune endpoint. -type NeptuneSettings struct { +// Provides information that defines a MySQL endpoint. +type MySQLSettings struct { _ struct{} `type:"structure"` - // The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated - // graph data to the Neptune target database before raising an error. The default - // is 250. - ErrorRetryDuration *int64 `type:"integer"` + // Database name for the endpoint. + DatabaseName *string `type:"string"` - // If you want AWS Identity and Access Management (IAM) authorization enabled - // for this endpoint, set this parameter to true. Then attach the appropriate - // IAM policy document to your service role specified by ServiceAccessRoleArn. - // The default is false. - IamAuthEnabled *bool `type:"boolean"` + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` - // The maximum size in kilobytes of migrated graph data stored in a .csv file - // before AWS DMS bulk-loads the data to the Neptune target database. The default - // is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s MySQLSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MySQLSettings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *MySQLSettings) SetDatabaseName(v string) *MySQLSettings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *MySQLSettings) SetPassword(v string) *MySQLSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *MySQLSettings) SetPort(v int64) *MySQLSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *MySQLSettings) SetServerName(v string) *MySQLSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *MySQLSettings) SetUsername(v string) *MySQLSettings { + s.Username = &v + return s +} + +// Provides information that defines an Amazon Neptune endpoint. +type NeptuneSettings struct { + _ struct{} `type:"structure"` + + // The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated + // graph data to the Neptune target database before raising an error. The default + // is 250. + ErrorRetryDuration *int64 `type:"integer"` + + // If you want AWS Identity and Access Management (IAM) authorization enabled + // for this endpoint, set this parameter to true. Then attach the appropriate + // IAM policy document to your service role specified by ServiceAccessRoleArn. + // The default is false. + IamAuthEnabled *bool `type:"boolean"` + + // The maximum size in kilobytes of migrated graph data stored in a .csv file + // before AWS DMS bulk-loads the data to the Neptune target database. The default + // is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, // ready to store the next batch of migrated graph data. MaxFileSize *int64 `type:"integer"` @@ -11436,6 +13178,138 @@ func (s *NeptuneSettings) SetServiceAccessRoleArn(v string) *NeptuneSettings { return s } +// Provides information that defines an Oracle endpoint. +type OracleSettings struct { + _ struct{} `type:"structure"` + + // For an Oracle source endpoint, your Oracle Automatic Storage Management (ASM) + // password. You can set this value from the asm_user_password value. You set + // this value as part of the comma-separated value that you set to the Password + // request parameter when you create the endpoint to access transaction logs + // using Binary Reader. For more information, see Configuration for change data + // capture (CDC) on an Oracle source database (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration). + AsmPassword *string `type:"string" sensitive:"true"` + + // For an Oracle source endpoint, your ASM server address. You can set this + // value from the asm_server value. You set asm_server as part of the extra + // connection attribute string to access an Oracle server with Binary Reader + // that uses ASM. For more information, see Configuration for change data capture + // (CDC) on an Oracle source database (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration). + AsmServer *string `type:"string"` + + // For an Oracle source endpoint, your ASM user name. You can set this value + // from the asm_user value. You set asm_user as part of the extra connection + // attribute string to access an Oracle server with Binary Reader that uses + // ASM. For more information, see Configuration for change data capture (CDC) + // on an Oracle source database (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration). + AsmUser *string `type:"string"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // For an Oracle source endpoint, the transparent data encryption (TDE) password + // required by AWM DMS to access Oracle redo logs encrypted by TDE using Binary + // Reader. It is also the TDE_Password part of the comma-separated value you + // set to the Password request parameter when you create the endpoint. The SecurityDbEncryptian + // setting is related to this SecurityDbEncryptionName setting. For more information, + // see Supported encryption methods for using Oracle as a source for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) + // in the AWS Database Migration Service User Guide. + SecurityDbEncryption *string `type:"string" sensitive:"true"` + + // For an Oracle source endpoint, the name of a key used for the transparent + // data encryption (TDE) of the columns and tablespaces in an Oracle source + // database that is encrypted using TDE. The key value is the value of the SecurityDbEncryption + // setting. For more information on setting the key name value of SecurityDbEncryptionName, + // see the information and example for setting the securityDbEncryptionName + // extra connection attribute in Supported encryption methods for using Oracle + // as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) + // in the AWS Database Migration Service User Guide. + SecurityDbEncryptionName *string `type:"string"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s OracleSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OracleSettings) GoString() string { + return s.String() +} + +// SetAsmPassword sets the AsmPassword field's value. +func (s *OracleSettings) SetAsmPassword(v string) *OracleSettings { + s.AsmPassword = &v + return s +} + +// SetAsmServer sets the AsmServer field's value. +func (s *OracleSettings) SetAsmServer(v string) *OracleSettings { + s.AsmServer = &v + return s +} + +// SetAsmUser sets the AsmUser field's value. +func (s *OracleSettings) SetAsmUser(v string) *OracleSettings { + s.AsmUser = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *OracleSettings) SetDatabaseName(v string) *OracleSettings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *OracleSettings) SetPassword(v string) *OracleSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *OracleSettings) SetPort(v int64) *OracleSettings { + s.Port = &v + return s +} + +// SetSecurityDbEncryption sets the SecurityDbEncryption field's value. +func (s *OracleSettings) SetSecurityDbEncryption(v string) *OracleSettings { + s.SecurityDbEncryption = &v + return s +} + +// SetSecurityDbEncryptionName sets the SecurityDbEncryptionName field's value. +func (s *OracleSettings) SetSecurityDbEncryptionName(v string) *OracleSettings { + s.SecurityDbEncryptionName = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *OracleSettings) SetServerName(v string) *OracleSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *OracleSettings) SetUsername(v string) *OracleSettings { + s.Username = &v + return s +} + // In response to the DescribeOrderableReplicationInstances operation, this // object describes an available replication instance. This description includes // the replication instance's type, engine version, and allocated storage. @@ -11629,6 +13503,66 @@ func (s *PendingMaintenanceAction) SetOptInStatus(v string) *PendingMaintenanceA return s } +// Provides information that defines a PostgreSQL endpoint. +type PostgreSQLSettings struct { + _ struct{} `type:"structure"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s PostgreSQLSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PostgreSQLSettings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *PostgreSQLSettings) SetDatabaseName(v string) *PostgreSQLSettings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *PostgreSQLSettings) SetPassword(v string) *PostgreSQLSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *PostgreSQLSettings) SetPort(v int64) *PostgreSQLSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *PostgreSQLSettings) SetServerName(v string) *PostgreSQLSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *PostgreSQLSettings) SetUsername(v string) *PostgreSQLSettings { + s.Username = &v + return s +} + type RebootReplicationInstanceInput struct { _ struct{} `type:"structure"` @@ -12166,6 +14100,16 @@ func (s *ReloadTablesInput) Validate() error { if s.TablesToReload == nil { invalidParams.Add(request.NewErrParamRequired("TablesToReload")) } + if s.TablesToReload != nil { + for i, v := range s.TablesToReload { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TablesToReload", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -13086,118 +15030,390 @@ func (s *ReplicationTaskAssessmentResult) SetS3ObjectUrl(v string) *ReplicationT return s } -// In response to a request by the DescribeReplicationTasks operation, this -// object provides a collection of statistics about a replication task. -type ReplicationTaskStats struct { +// Provides information that describes a premigration assessment run that you +// have started using the StartReplicationTaskAssessmentRun operation. +// +// Some of the information appears based on other operations that can return +// the ReplicationTaskAssessmentRun object. +type ReplicationTaskAssessmentRun struct { _ struct{} `type:"structure"` - // The elapsed time of the task, in milliseconds. - ElapsedTimeMillis *int64 `type:"long"` + // Indication of the completion progress for the individual assessments specified + // to run. + AssessmentProgress *ReplicationTaskAssessmentRunProgress `type:"structure"` - // The date the replication task was started either with a fresh start or a - // target reload. - FreshStartDate *time.Time `type:"timestamp"` + // Unique name of the assessment run. + AssessmentRunName *string `type:"string"` - // The date the replication task full load was completed. - FullLoadFinishDate *time.Time `type:"timestamp"` + // Last message generated by an individual assessment failure. + LastFailureMessage *string `type:"string"` - // The percent complete for the full load migration task. - FullLoadProgressPercent *int64 `type:"integer"` + // ARN of the migration task associated with this premigration assessment run. + ReplicationTaskArn *string `type:"string"` - // The date the replication task full load was started. - FullLoadStartDate *time.Time `type:"timestamp"` + // Amazon Resource Name (ARN) of this assessment run. + ReplicationTaskAssessmentRunArn *string `type:"string"` - // The date the replication task was started either with a fresh start or a - // resume. For more information, see StartReplicationTaskType (https://docs.aws.amazon.com/dms/latest/APIReference/API_StartReplicationTask.html#DMS-StartReplicationTask-request-StartReplicationTaskType). - StartDate *time.Time `type:"timestamp"` + // Date on which the assessment run was created using the StartReplicationTaskAssessmentRun + // operation. + ReplicationTaskAssessmentRunCreationDate *time.Time `type:"timestamp"` - // The date the replication task was stopped. - StopDate *time.Time `type:"timestamp"` + // Encryption mode used to encrypt the assessment run results. + ResultEncryptionMode *string `type:"string"` - // The number of errors that have occurred during this task. - TablesErrored *int64 `type:"integer"` + // ARN of the AWS KMS encryption key used to encrypt the assessment run results. + ResultKmsKeyArn *string `type:"string"` - // The number of tables loaded for this task. - TablesLoaded *int64 `type:"integer"` + // Amazon S3 bucket where AWS DMS stores the results of this assessment run. + ResultLocationBucket *string `type:"string"` - // The number of tables currently loading for this task. - TablesLoading *int64 `type:"integer"` + // Folder in an Amazon S3 bucket where AWS DMS stores the results of this assessment + // run. + ResultLocationFolder *string `type:"string"` - // The number of tables queued for this task. - TablesQueued *int64 `type:"integer"` + // ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun + // operation. + ServiceAccessRoleArn *string `type:"string"` + + // Assessment run status. + // + // This status can have one of the following values: + // + // * "cancelling" – The assessment run was canceled by the CancelReplicationTaskAssessmentRun + // operation. + // + // * "deleting" – The assessment run was deleted by the DeleteReplicationTaskAssessmentRun + // operation. + // + // * "failed" – At least one individual assessment completed with a failed + // status. + // + // * "error-provisioning" – An internal error occurred while resources + // were provisioned (during provisioning status). + // + // * "error-executing" – An internal error occurred while individual assessments + // ran (during running status). + // + // * "invalid state" – The assessment run is in an unknown state. + // + // * "passed" – All individual assessments have completed, and none has + // a failed status. + // + // * "provisioning" – Resources required to run individual assessments + // are being provisioned. + // + // * "running" – Individual assessments are being run. + // + // * "starting" – The assessment run is starting, but resources are not + // yet being provisioned for individual assessments. + Status *string `type:"string"` } // String returns the string representation -func (s ReplicationTaskStats) String() string { +func (s ReplicationTaskAssessmentRun) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReplicationTaskStats) GoString() string { +func (s ReplicationTaskAssessmentRun) GoString() string { return s.String() } -// SetElapsedTimeMillis sets the ElapsedTimeMillis field's value. -func (s *ReplicationTaskStats) SetElapsedTimeMillis(v int64) *ReplicationTaskStats { - s.ElapsedTimeMillis = &v +// SetAssessmentProgress sets the AssessmentProgress field's value. +func (s *ReplicationTaskAssessmentRun) SetAssessmentProgress(v *ReplicationTaskAssessmentRunProgress) *ReplicationTaskAssessmentRun { + s.AssessmentProgress = v return s } -// SetFreshStartDate sets the FreshStartDate field's value. -func (s *ReplicationTaskStats) SetFreshStartDate(v time.Time) *ReplicationTaskStats { - s.FreshStartDate = &v +// SetAssessmentRunName sets the AssessmentRunName field's value. +func (s *ReplicationTaskAssessmentRun) SetAssessmentRunName(v string) *ReplicationTaskAssessmentRun { + s.AssessmentRunName = &v return s } -// SetFullLoadFinishDate sets the FullLoadFinishDate field's value. -func (s *ReplicationTaskStats) SetFullLoadFinishDate(v time.Time) *ReplicationTaskStats { - s.FullLoadFinishDate = &v +// SetLastFailureMessage sets the LastFailureMessage field's value. +func (s *ReplicationTaskAssessmentRun) SetLastFailureMessage(v string) *ReplicationTaskAssessmentRun { + s.LastFailureMessage = &v return s } -// SetFullLoadProgressPercent sets the FullLoadProgressPercent field's value. -func (s *ReplicationTaskStats) SetFullLoadProgressPercent(v int64) *ReplicationTaskStats { - s.FullLoadProgressPercent = &v +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *ReplicationTaskAssessmentRun) SetReplicationTaskArn(v string) *ReplicationTaskAssessmentRun { + s.ReplicationTaskArn = &v return s } -// SetFullLoadStartDate sets the FullLoadStartDate field's value. -func (s *ReplicationTaskStats) SetFullLoadStartDate(v time.Time) *ReplicationTaskStats { - s.FullLoadStartDate = &v +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *ReplicationTaskAssessmentRun) SetReplicationTaskAssessmentRunArn(v string) *ReplicationTaskAssessmentRun { + s.ReplicationTaskAssessmentRunArn = &v return s } -// SetStartDate sets the StartDate field's value. -func (s *ReplicationTaskStats) SetStartDate(v time.Time) *ReplicationTaskStats { - s.StartDate = &v +// SetReplicationTaskAssessmentRunCreationDate sets the ReplicationTaskAssessmentRunCreationDate field's value. +func (s *ReplicationTaskAssessmentRun) SetReplicationTaskAssessmentRunCreationDate(v time.Time) *ReplicationTaskAssessmentRun { + s.ReplicationTaskAssessmentRunCreationDate = &v return s } -// SetStopDate sets the StopDate field's value. -func (s *ReplicationTaskStats) SetStopDate(v time.Time) *ReplicationTaskStats { - s.StopDate = &v +// SetResultEncryptionMode sets the ResultEncryptionMode field's value. +func (s *ReplicationTaskAssessmentRun) SetResultEncryptionMode(v string) *ReplicationTaskAssessmentRun { + s.ResultEncryptionMode = &v return s } -// SetTablesErrored sets the TablesErrored field's value. -func (s *ReplicationTaskStats) SetTablesErrored(v int64) *ReplicationTaskStats { - s.TablesErrored = &v +// SetResultKmsKeyArn sets the ResultKmsKeyArn field's value. +func (s *ReplicationTaskAssessmentRun) SetResultKmsKeyArn(v string) *ReplicationTaskAssessmentRun { + s.ResultKmsKeyArn = &v return s } -// SetTablesLoaded sets the TablesLoaded field's value. -func (s *ReplicationTaskStats) SetTablesLoaded(v int64) *ReplicationTaskStats { - s.TablesLoaded = &v +// SetResultLocationBucket sets the ResultLocationBucket field's value. +func (s *ReplicationTaskAssessmentRun) SetResultLocationBucket(v string) *ReplicationTaskAssessmentRun { + s.ResultLocationBucket = &v return s } -// SetTablesLoading sets the TablesLoading field's value. -func (s *ReplicationTaskStats) SetTablesLoading(v int64) *ReplicationTaskStats { - s.TablesLoading = &v +// SetResultLocationFolder sets the ResultLocationFolder field's value. +func (s *ReplicationTaskAssessmentRun) SetResultLocationFolder(v string) *ReplicationTaskAssessmentRun { + s.ResultLocationFolder = &v return s } -// SetTablesQueued sets the TablesQueued field's value. +// SetServiceAccessRoleArn sets the ServiceAccessRoleArn field's value. +func (s *ReplicationTaskAssessmentRun) SetServiceAccessRoleArn(v string) *ReplicationTaskAssessmentRun { + s.ServiceAccessRoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTaskAssessmentRun) SetStatus(v string) *ReplicationTaskAssessmentRun { + s.Status = &v + return s +} + +// The progress values reported by the AssessmentProgress response element. +type ReplicationTaskAssessmentRunProgress struct { + _ struct{} `type:"structure"` + + // The number of individual assessments that have completed, successfully or + // not. + IndividualAssessmentCompletedCount *int64 `type:"integer"` + + // The number of individual assessments that are specified to run. + IndividualAssessmentCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTaskAssessmentRunProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTaskAssessmentRunProgress) GoString() string { + return s.String() +} + +// SetIndividualAssessmentCompletedCount sets the IndividualAssessmentCompletedCount field's value. +func (s *ReplicationTaskAssessmentRunProgress) SetIndividualAssessmentCompletedCount(v int64) *ReplicationTaskAssessmentRunProgress { + s.IndividualAssessmentCompletedCount = &v + return s +} + +// SetIndividualAssessmentCount sets the IndividualAssessmentCount field's value. +func (s *ReplicationTaskAssessmentRunProgress) SetIndividualAssessmentCount(v int64) *ReplicationTaskAssessmentRunProgress { + s.IndividualAssessmentCount = &v + return s +} + +// Provides information that describes an individual assessment from a premigration +// assessment run. +type ReplicationTaskIndividualAssessment struct { + _ struct{} `type:"structure"` + + // Name of this individual assessment. + IndividualAssessmentName *string `type:"string"` + + // ARN of the premigration assessment run that is created to run this individual + // assessment. + ReplicationTaskAssessmentRunArn *string `type:"string"` + + // Amazon Resource Name (ARN) of this individual assessment. + ReplicationTaskIndividualAssessmentArn *string `type:"string"` + + // Date when this individual assessment was started as part of running the StartReplicationTaskAssessmentRun + // operation. + ReplicationTaskIndividualAssessmentStartDate *time.Time `type:"timestamp"` + + // Individual assessment status. + // + // This status can have one of the following values: + // + // * "cancelled" + // + // * "error" + // + // * "failed" + // + // * "passed" + // + // * "pending" + // + // * "running" + Status *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationTaskIndividualAssessment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTaskIndividualAssessment) GoString() string { + return s.String() +} + +// SetIndividualAssessmentName sets the IndividualAssessmentName field's value. +func (s *ReplicationTaskIndividualAssessment) SetIndividualAssessmentName(v string) *ReplicationTaskIndividualAssessment { + s.IndividualAssessmentName = &v + return s +} + +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *ReplicationTaskIndividualAssessment) SetReplicationTaskAssessmentRunArn(v string) *ReplicationTaskIndividualAssessment { + s.ReplicationTaskAssessmentRunArn = &v + return s +} + +// SetReplicationTaskIndividualAssessmentArn sets the ReplicationTaskIndividualAssessmentArn field's value. +func (s *ReplicationTaskIndividualAssessment) SetReplicationTaskIndividualAssessmentArn(v string) *ReplicationTaskIndividualAssessment { + s.ReplicationTaskIndividualAssessmentArn = &v + return s +} + +// SetReplicationTaskIndividualAssessmentStartDate sets the ReplicationTaskIndividualAssessmentStartDate field's value. +func (s *ReplicationTaskIndividualAssessment) SetReplicationTaskIndividualAssessmentStartDate(v time.Time) *ReplicationTaskIndividualAssessment { + s.ReplicationTaskIndividualAssessmentStartDate = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTaskIndividualAssessment) SetStatus(v string) *ReplicationTaskIndividualAssessment { + s.Status = &v + return s +} + +// In response to a request by the DescribeReplicationTasks operation, this +// object provides a collection of statistics about a replication task. +type ReplicationTaskStats struct { + _ struct{} `type:"structure"` + + // The elapsed time of the task, in milliseconds. + ElapsedTimeMillis *int64 `type:"long"` + + // The date the replication task was started either with a fresh start or a + // target reload. + FreshStartDate *time.Time `type:"timestamp"` + + // The date the replication task full load was completed. + FullLoadFinishDate *time.Time `type:"timestamp"` + + // The percent complete for the full load migration task. + FullLoadProgressPercent *int64 `type:"integer"` + + // The date the replication task full load was started. + FullLoadStartDate *time.Time `type:"timestamp"` + + // The date the replication task was started either with a fresh start or a + // resume. For more information, see StartReplicationTaskType (https://docs.aws.amazon.com/dms/latest/APIReference/API_StartReplicationTask.html#DMS-StartReplicationTask-request-StartReplicationTaskType). + StartDate *time.Time `type:"timestamp"` + + // The date the replication task was stopped. + StopDate *time.Time `type:"timestamp"` + + // The number of errors that have occurred during this task. + TablesErrored *int64 `type:"integer"` + + // The number of tables loaded for this task. + TablesLoaded *int64 `type:"integer"` + + // The number of tables currently loading for this task. + TablesLoading *int64 `type:"integer"` + + // The number of tables queued for this task. + TablesQueued *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTaskStats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTaskStats) GoString() string { + return s.String() +} + +// SetElapsedTimeMillis sets the ElapsedTimeMillis field's value. +func (s *ReplicationTaskStats) SetElapsedTimeMillis(v int64) *ReplicationTaskStats { + s.ElapsedTimeMillis = &v + return s +} + +// SetFreshStartDate sets the FreshStartDate field's value. +func (s *ReplicationTaskStats) SetFreshStartDate(v time.Time) *ReplicationTaskStats { + s.FreshStartDate = &v + return s +} + +// SetFullLoadFinishDate sets the FullLoadFinishDate field's value. +func (s *ReplicationTaskStats) SetFullLoadFinishDate(v time.Time) *ReplicationTaskStats { + s.FullLoadFinishDate = &v + return s +} + +// SetFullLoadProgressPercent sets the FullLoadProgressPercent field's value. +func (s *ReplicationTaskStats) SetFullLoadProgressPercent(v int64) *ReplicationTaskStats { + s.FullLoadProgressPercent = &v + return s +} + +// SetFullLoadStartDate sets the FullLoadStartDate field's value. +func (s *ReplicationTaskStats) SetFullLoadStartDate(v time.Time) *ReplicationTaskStats { + s.FullLoadStartDate = &v + return s +} + +// SetStartDate sets the StartDate field's value. +func (s *ReplicationTaskStats) SetStartDate(v time.Time) *ReplicationTaskStats { + s.StartDate = &v + return s +} + +// SetStopDate sets the StopDate field's value. +func (s *ReplicationTaskStats) SetStopDate(v time.Time) *ReplicationTaskStats { + s.StopDate = &v + return s +} + +// SetTablesErrored sets the TablesErrored field's value. +func (s *ReplicationTaskStats) SetTablesErrored(v int64) *ReplicationTaskStats { + s.TablesErrored = &v + return s +} + +// SetTablesLoaded sets the TablesLoaded field's value. +func (s *ReplicationTaskStats) SetTablesLoaded(v int64) *ReplicationTaskStats { + s.TablesLoaded = &v + return s +} + +// SetTablesLoading sets the TablesLoading field's value. +func (s *ReplicationTaskStats) SetTablesLoading(v int64) *ReplicationTaskStats { + s.TablesLoading = &v + return s +} + +// SetTablesQueued sets the TablesQueued field's value. func (s *ReplicationTaskStats) SetTablesQueued(v int64) *ReplicationTaskStats { s.TablesQueued = &v return s @@ -13409,6 +15625,118 @@ func (s *ResourceQuotaExceededFault) RequestID() string { return s.RespMetadata.RequestID } +// Insufficient privileges are preventing access to an Amazon S3 object. +type S3AccessDeniedFault struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s S3AccessDeniedFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3AccessDeniedFault) GoString() string { + return s.String() +} + +func newErrorS3AccessDeniedFault(v protocol.ResponseMetadata) error { + return &S3AccessDeniedFault{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *S3AccessDeniedFault) Code() string { + return "S3AccessDeniedFault" +} + +// Message returns the exception's message. +func (s *S3AccessDeniedFault) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *S3AccessDeniedFault) OrigErr() error { + return nil +} + +func (s *S3AccessDeniedFault) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *S3AccessDeniedFault) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *S3AccessDeniedFault) RequestID() string { + return s.RespMetadata.RequestID +} + +// A specified Amazon S3 bucket, bucket folder, or other object can't be found. +type S3ResourceNotFoundFault struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s S3ResourceNotFoundFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3ResourceNotFoundFault) GoString() string { + return s.String() +} + +func newErrorS3ResourceNotFoundFault(v protocol.ResponseMetadata) error { + return &S3ResourceNotFoundFault{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *S3ResourceNotFoundFault) Code() string { + return "S3ResourceNotFoundFault" +} + +// Message returns the exception's message. +func (s *S3ResourceNotFoundFault) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *S3ResourceNotFoundFault) OrigErr() error { + return nil +} + +func (s *S3ResourceNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *S3ResourceNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *S3ResourceNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID +} + // Settings for exporting data to Amazon S3. type S3Settings struct { _ struct{} `type:"structure"` @@ -13956,6 +16284,187 @@ func (s *StartReplicationTaskAssessmentOutput) SetReplicationTask(v *Replication return s } +type StartReplicationTaskAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // Unique name to identify the assessment run. + // + // AssessmentRunName is a required field + AssessmentRunName *string `type:"string" required:"true"` + + // Space-separated list of names for specific individual assessments that you + // want to exclude. These names come from the default list of individual assessments + // that AWS DMS supports for the associated migration task. This task is specified + // by ReplicationTaskArn. + // + // You can't set a value for Exclude if you also set a value for IncludeOnly + // in the API operation. + // + // To identify the names of the default individual assessments that AWS DMS + // supports for the associated migration task, run the DescribeApplicableIndividualAssessments + // operation using its own ReplicationTaskArn request parameter. + Exclude []*string `type:"list"` + + // Space-separated list of names for specific individual assessments that you + // want to include. These names come from the default list of individual assessments + // that AWS DMS supports for the associated migration task. This task is specified + // by ReplicationTaskArn. + // + // You can't set a value for IncludeOnly if you also set a value for Exclude + // in the API operation. + // + // To identify the names of the default individual assessments that AWS DMS + // supports for the associated migration task, run the DescribeApplicableIndividualAssessments + // operation using its own ReplicationTaskArn request parameter. + IncludeOnly []*string `type:"list"` + + // Amazon Resource Name (ARN) of the migration task associated with the premigration + // assessment run that you want to start. + // + // ReplicationTaskArn is a required field + ReplicationTaskArn *string `type:"string" required:"true"` + + // Encryption mode that you can specify to encrypt the results of this assessment + // run. If you don't specify this request parameter, AWS DMS stores the assessment + // run results without encryption. You can specify one of the options following: + // + // * "SSE_S3" – The server-side encryption provided as a default by Amazon + // S3. + // + // * "SSE_KMS" – AWS Key Management Service (AWS KMS) encryption. This + // encryption can use either a custom KMS encryption key that you specify + // or the default KMS encryption key that DMS provides. + ResultEncryptionMode *string `type:"string"` + + // ARN of a custom KMS encryption key that you specify when you set ResultEncryptionMode + // to "SSE_KMS". + ResultKmsKeyArn *string `type:"string"` + + // Amazon S3 bucket where you want AWS DMS to store the results of this assessment + // run. + // + // ResultLocationBucket is a required field + ResultLocationBucket *string `type:"string" required:"true"` + + // Folder within an Amazon S3 bucket where you want AWS DMS to store the results + // of this assessment run. + ResultLocationFolder *string `type:"string"` + + // ARN of a service role needed to start the assessment run. + // + // ServiceAccessRoleArn is a required field + ServiceAccessRoleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartReplicationTaskAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartReplicationTaskAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartReplicationTaskAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartReplicationTaskAssessmentRunInput"} + if s.AssessmentRunName == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentRunName")) + } + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + if s.ResultLocationBucket == nil { + invalidParams.Add(request.NewErrParamRequired("ResultLocationBucket")) + } + if s.ServiceAccessRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceAccessRoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssessmentRunName sets the AssessmentRunName field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetAssessmentRunName(v string) *StartReplicationTaskAssessmentRunInput { + s.AssessmentRunName = &v + return s +} + +// SetExclude sets the Exclude field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetExclude(v []*string) *StartReplicationTaskAssessmentRunInput { + s.Exclude = v + return s +} + +// SetIncludeOnly sets the IncludeOnly field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetIncludeOnly(v []*string) *StartReplicationTaskAssessmentRunInput { + s.IncludeOnly = v + return s +} + +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetReplicationTaskArn(v string) *StartReplicationTaskAssessmentRunInput { + s.ReplicationTaskArn = &v + return s +} + +// SetResultEncryptionMode sets the ResultEncryptionMode field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultEncryptionMode(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultEncryptionMode = &v + return s +} + +// SetResultKmsKeyArn sets the ResultKmsKeyArn field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultKmsKeyArn(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultKmsKeyArn = &v + return s +} + +// SetResultLocationBucket sets the ResultLocationBucket field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultLocationBucket(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultLocationBucket = &v + return s +} + +// SetResultLocationFolder sets the ResultLocationFolder field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultLocationFolder(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultLocationFolder = &v + return s +} + +// SetServiceAccessRoleArn sets the ServiceAccessRoleArn field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetServiceAccessRoleArn(v string) *StartReplicationTaskAssessmentRunInput { + s.ServiceAccessRoleArn = &v + return s +} + +type StartReplicationTaskAssessmentRunOutput struct { + _ struct{} `type:"structure"` + + // The premigration assessment run that was started. + ReplicationTaskAssessmentRun *ReplicationTaskAssessmentRun `type:"structure"` +} + +// String returns the string representation +func (s StartReplicationTaskAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartReplicationTaskAssessmentRunOutput) GoString() string { + return s.String() +} + +// SetReplicationTaskAssessmentRun sets the ReplicationTaskAssessmentRun field's value. +func (s *StartReplicationTaskAssessmentRunOutput) SetReplicationTaskAssessmentRun(v *ReplicationTaskAssessmentRun) *StartReplicationTaskAssessmentRunOutput { + s.ReplicationTaskAssessmentRun = v + return s +} + type StartReplicationTaskInput struct { _ struct{} `type:"structure"` @@ -14370,6 +16879,66 @@ func (s *SupportedEndpointType) SetSupportsCDC(v bool) *SupportedEndpointType { return s } +// Provides information that defines a SAP ASE endpoint. +type SybaseSettings struct { + _ struct{} `type:"structure"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s SybaseSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SybaseSettings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *SybaseSettings) SetDatabaseName(v string) *SybaseSettings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *SybaseSettings) SetPassword(v string) *SybaseSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *SybaseSettings) SetPort(v int64) *SybaseSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *SybaseSettings) SetServerName(v string) *SybaseSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *SybaseSettings) SetUsername(v string) *SybaseSettings { + s.Username = &v + return s +} + // Provides a collection of table statistics in response to a request by the // DescribeTableStatistics operation. type TableStatistics struct { @@ -14593,10 +17162,14 @@ type TableToReload struct { _ struct{} `type:"structure"` // The schema name of the table to be reloaded. - SchemaName *string `type:"string"` + // + // SchemaName is a required field + SchemaName *string `type:"string" required:"true"` // The table name of the table to be reloaded. - TableName *string `type:"string"` + // + // TableName is a required field + TableName *string `type:"string" required:"true"` } // String returns the string representation @@ -14609,6 +17182,22 @@ func (s TableToReload) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableToReload) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableToReload"} + if s.SchemaName == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaName")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetSchemaName sets the SchemaName field's value. func (s *TableToReload) SetSchemaName(v string) *TableToReload { s.SchemaName = &v diff --git a/service/databasemigrationservice/databasemigrationserviceiface/interface.go b/service/databasemigrationservice/databasemigrationserviceiface/interface.go index 81f948967e7..0fbe21351d7 100644 --- a/service/databasemigrationservice/databasemigrationserviceiface/interface.go +++ b/service/databasemigrationservice/databasemigrationserviceiface/interface.go @@ -68,6 +68,10 @@ type DatabaseMigrationServiceAPI interface { ApplyPendingMaintenanceActionWithContext(aws.Context, *databasemigrationservice.ApplyPendingMaintenanceActionInput, ...request.Option) (*databasemigrationservice.ApplyPendingMaintenanceActionOutput, error) ApplyPendingMaintenanceActionRequest(*databasemigrationservice.ApplyPendingMaintenanceActionInput) (*request.Request, *databasemigrationservice.ApplyPendingMaintenanceActionOutput) + CancelReplicationTaskAssessmentRun(*databasemigrationservice.CancelReplicationTaskAssessmentRunInput) (*databasemigrationservice.CancelReplicationTaskAssessmentRunOutput, error) + CancelReplicationTaskAssessmentRunWithContext(aws.Context, *databasemigrationservice.CancelReplicationTaskAssessmentRunInput, ...request.Option) (*databasemigrationservice.CancelReplicationTaskAssessmentRunOutput, error) + CancelReplicationTaskAssessmentRunRequest(*databasemigrationservice.CancelReplicationTaskAssessmentRunInput) (*request.Request, *databasemigrationservice.CancelReplicationTaskAssessmentRunOutput) + CreateEndpoint(*databasemigrationservice.CreateEndpointInput) (*databasemigrationservice.CreateEndpointOutput, error) CreateEndpointWithContext(aws.Context, *databasemigrationservice.CreateEndpointInput, ...request.Option) (*databasemigrationservice.CreateEndpointOutput, error) CreateEndpointRequest(*databasemigrationservice.CreateEndpointInput) (*request.Request, *databasemigrationservice.CreateEndpointOutput) @@ -116,10 +120,21 @@ type DatabaseMigrationServiceAPI interface { DeleteReplicationTaskWithContext(aws.Context, *databasemigrationservice.DeleteReplicationTaskInput, ...request.Option) (*databasemigrationservice.DeleteReplicationTaskOutput, error) DeleteReplicationTaskRequest(*databasemigrationservice.DeleteReplicationTaskInput) (*request.Request, *databasemigrationservice.DeleteReplicationTaskOutput) + DeleteReplicationTaskAssessmentRun(*databasemigrationservice.DeleteReplicationTaskAssessmentRunInput) (*databasemigrationservice.DeleteReplicationTaskAssessmentRunOutput, error) + DeleteReplicationTaskAssessmentRunWithContext(aws.Context, *databasemigrationservice.DeleteReplicationTaskAssessmentRunInput, ...request.Option) (*databasemigrationservice.DeleteReplicationTaskAssessmentRunOutput, error) + DeleteReplicationTaskAssessmentRunRequest(*databasemigrationservice.DeleteReplicationTaskAssessmentRunInput) (*request.Request, *databasemigrationservice.DeleteReplicationTaskAssessmentRunOutput) + DescribeAccountAttributes(*databasemigrationservice.DescribeAccountAttributesInput) (*databasemigrationservice.DescribeAccountAttributesOutput, error) DescribeAccountAttributesWithContext(aws.Context, *databasemigrationservice.DescribeAccountAttributesInput, ...request.Option) (*databasemigrationservice.DescribeAccountAttributesOutput, error) DescribeAccountAttributesRequest(*databasemigrationservice.DescribeAccountAttributesInput) (*request.Request, *databasemigrationservice.DescribeAccountAttributesOutput) + DescribeApplicableIndividualAssessments(*databasemigrationservice.DescribeApplicableIndividualAssessmentsInput) (*databasemigrationservice.DescribeApplicableIndividualAssessmentsOutput, error) + DescribeApplicableIndividualAssessmentsWithContext(aws.Context, *databasemigrationservice.DescribeApplicableIndividualAssessmentsInput, ...request.Option) (*databasemigrationservice.DescribeApplicableIndividualAssessmentsOutput, error) + DescribeApplicableIndividualAssessmentsRequest(*databasemigrationservice.DescribeApplicableIndividualAssessmentsInput) (*request.Request, *databasemigrationservice.DescribeApplicableIndividualAssessmentsOutput) + + DescribeApplicableIndividualAssessmentsPages(*databasemigrationservice.DescribeApplicableIndividualAssessmentsInput, func(*databasemigrationservice.DescribeApplicableIndividualAssessmentsOutput, bool) bool) error + DescribeApplicableIndividualAssessmentsPagesWithContext(aws.Context, *databasemigrationservice.DescribeApplicableIndividualAssessmentsInput, func(*databasemigrationservice.DescribeApplicableIndividualAssessmentsOutput, bool) bool, ...request.Option) error + DescribeCertificates(*databasemigrationservice.DescribeCertificatesInput) (*databasemigrationservice.DescribeCertificatesOutput, error) DescribeCertificatesWithContext(aws.Context, *databasemigrationservice.DescribeCertificatesInput, ...request.Option) (*databasemigrationservice.DescribeCertificatesOutput, error) DescribeCertificatesRequest(*databasemigrationservice.DescribeCertificatesInput) (*request.Request, *databasemigrationservice.DescribeCertificatesOutput) @@ -212,6 +227,20 @@ type DatabaseMigrationServiceAPI interface { DescribeReplicationTaskAssessmentResultsPages(*databasemigrationservice.DescribeReplicationTaskAssessmentResultsInput, func(*databasemigrationservice.DescribeReplicationTaskAssessmentResultsOutput, bool) bool) error DescribeReplicationTaskAssessmentResultsPagesWithContext(aws.Context, *databasemigrationservice.DescribeReplicationTaskAssessmentResultsInput, func(*databasemigrationservice.DescribeReplicationTaskAssessmentResultsOutput, bool) bool, ...request.Option) error + DescribeReplicationTaskAssessmentRuns(*databasemigrationservice.DescribeReplicationTaskAssessmentRunsInput) (*databasemigrationservice.DescribeReplicationTaskAssessmentRunsOutput, error) + DescribeReplicationTaskAssessmentRunsWithContext(aws.Context, *databasemigrationservice.DescribeReplicationTaskAssessmentRunsInput, ...request.Option) (*databasemigrationservice.DescribeReplicationTaskAssessmentRunsOutput, error) + DescribeReplicationTaskAssessmentRunsRequest(*databasemigrationservice.DescribeReplicationTaskAssessmentRunsInput) (*request.Request, *databasemigrationservice.DescribeReplicationTaskAssessmentRunsOutput) + + DescribeReplicationTaskAssessmentRunsPages(*databasemigrationservice.DescribeReplicationTaskAssessmentRunsInput, func(*databasemigrationservice.DescribeReplicationTaskAssessmentRunsOutput, bool) bool) error + DescribeReplicationTaskAssessmentRunsPagesWithContext(aws.Context, *databasemigrationservice.DescribeReplicationTaskAssessmentRunsInput, func(*databasemigrationservice.DescribeReplicationTaskAssessmentRunsOutput, bool) bool, ...request.Option) error + + DescribeReplicationTaskIndividualAssessments(*databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsInput) (*databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsOutput, error) + DescribeReplicationTaskIndividualAssessmentsWithContext(aws.Context, *databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsInput, ...request.Option) (*databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsOutput, error) + DescribeReplicationTaskIndividualAssessmentsRequest(*databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsInput) (*request.Request, *databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsOutput) + + DescribeReplicationTaskIndividualAssessmentsPages(*databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsInput, func(*databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsOutput, bool) bool) error + DescribeReplicationTaskIndividualAssessmentsPagesWithContext(aws.Context, *databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsInput, func(*databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsOutput, bool) bool, ...request.Option) error + DescribeReplicationTasks(*databasemigrationservice.DescribeReplicationTasksInput) (*databasemigrationservice.DescribeReplicationTasksOutput, error) DescribeReplicationTasksWithContext(aws.Context, *databasemigrationservice.DescribeReplicationTasksInput, ...request.Option) (*databasemigrationservice.DescribeReplicationTasksOutput, error) DescribeReplicationTasksRequest(*databasemigrationservice.DescribeReplicationTasksInput) (*request.Request, *databasemigrationservice.DescribeReplicationTasksOutput) @@ -285,6 +314,10 @@ type DatabaseMigrationServiceAPI interface { StartReplicationTaskAssessmentWithContext(aws.Context, *databasemigrationservice.StartReplicationTaskAssessmentInput, ...request.Option) (*databasemigrationservice.StartReplicationTaskAssessmentOutput, error) StartReplicationTaskAssessmentRequest(*databasemigrationservice.StartReplicationTaskAssessmentInput) (*request.Request, *databasemigrationservice.StartReplicationTaskAssessmentOutput) + StartReplicationTaskAssessmentRun(*databasemigrationservice.StartReplicationTaskAssessmentRunInput) (*databasemigrationservice.StartReplicationTaskAssessmentRunOutput, error) + StartReplicationTaskAssessmentRunWithContext(aws.Context, *databasemigrationservice.StartReplicationTaskAssessmentRunInput, ...request.Option) (*databasemigrationservice.StartReplicationTaskAssessmentRunOutput, error) + StartReplicationTaskAssessmentRunRequest(*databasemigrationservice.StartReplicationTaskAssessmentRunInput) (*request.Request, *databasemigrationservice.StartReplicationTaskAssessmentRunOutput) + StopReplicationTask(*databasemigrationservice.StopReplicationTaskInput) (*databasemigrationservice.StopReplicationTaskOutput, error) StopReplicationTaskWithContext(aws.Context, *databasemigrationservice.StopReplicationTaskInput, ...request.Option) (*databasemigrationservice.StopReplicationTaskOutput, error) StopReplicationTaskRequest(*databasemigrationservice.StopReplicationTaskInput) (*request.Request, *databasemigrationservice.StopReplicationTaskOutput) diff --git a/service/databasemigrationservice/errors.go b/service/databasemigrationservice/errors.go index e2e32caf6f0..b02044ee188 100644 --- a/service/databasemigrationservice/errors.go +++ b/service/databasemigrationservice/errors.go @@ -53,6 +53,13 @@ const ( // The specified master key (CMK) isn't enabled. ErrCodeKMSDisabledFault = "KMSDisabledFault" + // ErrCodeKMSFault for service response error code + // "KMSFault". + // + // An AWS Key Management Service (AWS KMS) error is preventing access to AWS + // KMS. + ErrCodeKMSFault = "KMSFault" + // ErrCodeKMSInvalidStateFault for service response error code // "KMSInvalidStateFault". // @@ -102,6 +109,18 @@ const ( // The quota for this resource quota has been exceeded. ErrCodeResourceQuotaExceededFault = "ResourceQuotaExceededFault" + // ErrCodeS3AccessDeniedFault for service response error code + // "S3AccessDeniedFault". + // + // Insufficient privileges are preventing access to an Amazon S3 object. + ErrCodeS3AccessDeniedFault = "S3AccessDeniedFault" + + // ErrCodeS3ResourceNotFoundFault for service response error code + // "S3ResourceNotFoundFault". + // + // A specified Amazon S3 bucket, bucket folder, or other object can't be found. + ErrCodeS3ResourceNotFoundFault = "S3ResourceNotFoundFault" + // ErrCodeSNSInvalidTopicFault for service response error code // "SNSInvalidTopicFault". // @@ -141,6 +160,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidSubnet": newErrorInvalidSubnet, "KMSAccessDeniedFault": newErrorKMSAccessDeniedFault, "KMSDisabledFault": newErrorKMSDisabledFault, + "KMSFault": newErrorKMSFault, "KMSInvalidStateFault": newErrorKMSInvalidStateFault, "KMSKeyNotAccessibleFault": newErrorKMSKeyNotAccessibleFault, "KMSNotFoundFault": newErrorKMSNotFoundFault, @@ -149,6 +169,8 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ResourceAlreadyExistsFault": newErrorResourceAlreadyExistsFault, "ResourceNotFoundFault": newErrorResourceNotFoundFault, "ResourceQuotaExceededFault": newErrorResourceQuotaExceededFault, + "S3AccessDeniedFault": newErrorS3AccessDeniedFault, + "S3ResourceNotFoundFault": newErrorS3ResourceNotFoundFault, "SNSInvalidTopicFault": newErrorSNSInvalidTopicFault, "SNSNoAuthorizationFault": newErrorSNSNoAuthorizationFault, "StorageQuotaExceededFault": newErrorStorageQuotaExceededFault, diff --git a/service/datasync/api.go b/service/datasync/api.go index 79b332bbf01..3473737cf83 100644 --- a/service/datasync/api.go +++ b/service/datasync/api.go @@ -60,7 +60,7 @@ func (c *DataSync) CancelTaskExecutionRequest(input *CancelTaskExecutionInput) ( // // Cancels execution of a task. // -// When you cancel a task execution, the transfer of some files are abruptly +// When you cancel a task execution, the transfer of some files is abruptly // interrupted. The contents of files that are transferred to the destination // might be incomplete or inconsistent with the source files. However, if you // start a new task execution on the same task and you allow the task execution @@ -156,7 +156,7 @@ func (c *DataSync) CreateAgentRequest(input *CreateAgentInput) (req *request.Req // target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created // in this AWS Region. // -// You can activate the agent in a VPC (Virtual private Cloud) or provide the +// You can activate the agent in a VPC (virtual private cloud) or provide the // agent access to a VPC endpoint so you can run tasks without going over the // public Internet. // @@ -413,7 +413,7 @@ func (c *DataSync) CreateLocationNfsRequest(input *CreateLocationNfsInput) (req // CreateLocationNfs API operation for AWS DataSync. // // Defines a file system on a Network File System (NFS) server that can be read -// from or written to +// from or written to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -451,6 +451,88 @@ func (c *DataSync) CreateLocationNfsWithContext(ctx aws.Context, input *CreateLo return out, req.Send() } +const opCreateLocationObjectStorage = "CreateLocationObjectStorage" + +// CreateLocationObjectStorageRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocationObjectStorage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocationObjectStorage for more information on using the CreateLocationObjectStorage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocationObjectStorageRequest method. +// req, resp := client.CreateLocationObjectStorageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationObjectStorage +func (c *DataSync) CreateLocationObjectStorageRequest(input *CreateLocationObjectStorageInput) (req *request.Request, output *CreateLocationObjectStorageOutput) { + op := &request.Operation{ + Name: opCreateLocationObjectStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocationObjectStorageInput{} + } + + output = &CreateLocationObjectStorageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocationObjectStorage API operation for AWS DataSync. +// +// Creates an endpoint for a self-managed object storage bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation CreateLocationObjectStorage for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationObjectStorage +func (c *DataSync) CreateLocationObjectStorage(input *CreateLocationObjectStorageInput) (*CreateLocationObjectStorageOutput, error) { + req, out := c.CreateLocationObjectStorageRequest(input) + return out, req.Send() +} + +// CreateLocationObjectStorageWithContext is the same as CreateLocationObjectStorage with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocationObjectStorage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) CreateLocationObjectStorageWithContext(ctx aws.Context, input *CreateLocationObjectStorageInput, opts ...request.Option) (*CreateLocationObjectStorageOutput, error) { + req, out := c.CreateLocationObjectStorageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateLocationS3 = "CreateLocationS3" // CreateLocationS3Request generates a "aws/request.Request" representing the @@ -586,7 +668,7 @@ func (c *DataSync) CreateLocationSmbRequest(input *CreateLocationSmbInput) (req // CreateLocationSmb API operation for AWS DataSync. // -// Defines a file system on an Server Message Block (SMB) server that can be +// Defines a file system on a Server Message Block (SMB) server that can be // read from or written to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1269,7 +1351,7 @@ func (c *DataSync) DescribeLocationNfsRequest(input *DescribeLocationNfsInput) ( // DescribeLocationNfs API operation for AWS DataSync. // -// Returns metadata, such as the path information, about a NFS location. +// Returns metadata, such as the path information, about an NFS location. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1307,6 +1389,88 @@ func (c *DataSync) DescribeLocationNfsWithContext(ctx aws.Context, input *Descri return out, req.Send() } +const opDescribeLocationObjectStorage = "DescribeLocationObjectStorage" + +// DescribeLocationObjectStorageRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocationObjectStorage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocationObjectStorage for more information on using the DescribeLocationObjectStorage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocationObjectStorageRequest method. +// req, resp := client.DescribeLocationObjectStorageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationObjectStorage +func (c *DataSync) DescribeLocationObjectStorageRequest(input *DescribeLocationObjectStorageInput) (req *request.Request, output *DescribeLocationObjectStorageOutput) { + op := &request.Operation{ + Name: opDescribeLocationObjectStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationObjectStorageInput{} + } + + output = &DescribeLocationObjectStorageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocationObjectStorage API operation for AWS DataSync. +// +// Returns metadata about a self-managed object storage server location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation DescribeLocationObjectStorage for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationObjectStorage +func (c *DataSync) DescribeLocationObjectStorage(input *DescribeLocationObjectStorageInput) (*DescribeLocationObjectStorageOutput, error) { + req, out := c.DescribeLocationObjectStorageRequest(input) + return out, req.Send() +} + +// DescribeLocationObjectStorageWithContext is the same as DescribeLocationObjectStorage with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocationObjectStorage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) DescribeLocationObjectStorageWithContext(ctx aws.Context, input *DescribeLocationObjectStorageInput, opts ...request.Option) (*DescribeLocationObjectStorageOutput, error) { + req, out := c.DescribeLocationObjectStorageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLocationS3 = "DescribeLocationS3" // DescribeLocationS3Request generates a "aws/request.Request" representing the @@ -1433,7 +1597,7 @@ func (c *DataSync) DescribeLocationSmbRequest(input *DescribeLocationSmbInput) ( // DescribeLocationSmb API operation for AWS DataSync. // -// Returns metadata, such as the path and user information about a SMB location. +// Returns metadata, such as the path and user information about an SMB location. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1835,7 +1999,7 @@ func (c *DataSync) ListLocationsRequest(input *ListLocationsInput) (req *request // ListLocations API operation for AWS DataSync. // -// Returns a lists of source and destination locations. +// Returns a list of source and destination locations. // // If you have more locations than are returned in a response (that is, the // response returns only a truncated list of your agents), the response contains @@ -1980,7 +2144,7 @@ func (c *DataSync) ListTagsForResourceRequest(input *ListTagsForResourceInput) ( // ListTagsForResource API operation for AWS DataSync. // -// Returns all the tags associated with a specified resources. +// Returns all the tags associated with a specified resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2912,11 +3076,11 @@ type CreateAgentInput struct { // in UTF-8 format, and the following special characters: + - = . _ : / @. Tags []*TagListEntry `type:"list"` - // The ID of the VPC (Virtual Private Cloud) endpoint that the agent has access + // The ID of the VPC (virtual private cloud) endpoint that the agent has access // to. This is the client-side VPC endpoint, also called a PrivateLink. If you // don't have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service // Configuration (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html#create-endpoint-service) - // in the AWS VPC User Guide. + // in the Amazon VPC User Guide. // // VPC endpoint ID looks like this: vpce-01234d5aff67890e1. VpcEndpointId *string `type:"string"` @@ -3061,7 +3225,7 @@ type CreateLocationEfsInput struct { // system is used to read data from the EFS source location or write data to // the EFS destination. By default, AWS DataSync uses the root directory. // - // Subdirectory must be specified with forward slashes. For example /path/to/folder. + // Subdirectory must be specified with forward slashes. For example, /path/to/folder. Subdirectory *string `type:"string"` // The key-value pair that represents a tag that you want to add to the resource. @@ -3323,6 +3487,10 @@ type CreateLocationNfsInput struct { // Contains a list of Amazon Resource Names (ARNs) of agents that are used to // connect to an NFS server. // + // If you are copying data to or from your AWS Snowcone device, see NFS Server + // on AWS Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) + // for more information. + // // OnPremConfig is a required field OnPremConfig *OnPremConfig `type:"structure" required:"true"` @@ -3330,6 +3498,10 @@ type CreateLocationNfsInput struct { // (DNS) name of the NFS server. An agent that is installed on-premises uses // this host name to mount the NFS server in a network. // + // If you are copying data to or from your AWS Snowcone device, see NFS Server + // on AWS Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) + // for more information. + // // This name must either be DNS-compliant or must be an IP version 4 (IPv4) // address. // @@ -3354,6 +3526,10 @@ type CreateLocationNfsInput struct { // enables the agent to read the files. For the agent to access directories, // you must additionally enable all execute access. // + // If you are copying data to or from your AWS Snowcone device, see NFS Server + // on AWS Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) + // for more information. + // // For information about NFS export configuration, see 18.7. The /etc/exports // Configuration File in the Red Hat Enterprise Linux documentation. // @@ -3464,6 +3640,189 @@ func (s *CreateLocationNfsOutput) SetLocationArn(v string) *CreateLocationNfsOut return s } +// CreateLocationObjectStorageRequest +type CreateLocationObjectStorageInput struct { + _ struct{} `type:"structure"` + + // Optional. The access key is used if credentials are required to access the + // self-managed object storage server. + AccessKey *string `min:"8" type:"string"` + + // The Amazon Resource Name (ARN) of the agents associated with the self-managed + // object storage server location. + // + // AgentArns is a required field + AgentArns []*string `min:"1" type:"list" required:"true"` + + // The bucket on the self-managed object storage server that is used to read + // data from. + // + // BucketName is a required field + BucketName *string `min:"3" type:"string" required:"true"` + + // Optional. The secret key is used if credentials are required to access the + // self-managed object storage server. + SecretKey *string `min:"8" type:"string" sensitive:"true"` + + // The name of the self-managed object storage server. This value is the IP + // address or Domain Name Service (DNS) name of the object storage server. An + // agent uses this host name to mount the object storage server in a network. + // + // ServerHostname is a required field + ServerHostname *string `type:"string" required:"true"` + + // The port that your self-managed object storage server accepts inbound network + // traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 + // (HTTPS). You can specify a custom port if your self-managed object storage + // server requires one. + ServerPort *int64 `min:"1" type:"integer"` + + // The protocol that the object storage server uses to communicate. Valid values + // are HTTP or HTTPS. + ServerProtocol *string `type:"string" enum:"ObjectStorageServerProtocol"` + + // The subdirectory in the self-managed object storage server that is used to + // read data from. + Subdirectory *string `type:"string"` + + // The key-value pair that represents the tag that you want to add to the location. + // The value can be an empty string. We recommend using tags to name your resources. + Tags []*TagListEntry `type:"list"` +} + +// String returns the string representation +func (s CreateLocationObjectStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocationObjectStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocationObjectStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocationObjectStorageInput"} + if s.AccessKey != nil && len(*s.AccessKey) < 8 { + invalidParams.Add(request.NewErrParamMinLen("AccessKey", 8)) + } + if s.AgentArns == nil { + invalidParams.Add(request.NewErrParamRequired("AgentArns")) + } + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + if s.SecretKey != nil && len(*s.SecretKey) < 8 { + invalidParams.Add(request.NewErrParamMinLen("SecretKey", 8)) + } + if s.ServerHostname == nil { + invalidParams.Add(request.NewErrParamRequired("ServerHostname")) + } + if s.ServerPort != nil && *s.ServerPort < 1 { + invalidParams.Add(request.NewErrParamMinValue("ServerPort", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKey sets the AccessKey field's value. +func (s *CreateLocationObjectStorageInput) SetAccessKey(v string) *CreateLocationObjectStorageInput { + s.AccessKey = &v + return s +} + +// SetAgentArns sets the AgentArns field's value. +func (s *CreateLocationObjectStorageInput) SetAgentArns(v []*string) *CreateLocationObjectStorageInput { + s.AgentArns = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *CreateLocationObjectStorageInput) SetBucketName(v string) *CreateLocationObjectStorageInput { + s.BucketName = &v + return s +} + +// SetSecretKey sets the SecretKey field's value. +func (s *CreateLocationObjectStorageInput) SetSecretKey(v string) *CreateLocationObjectStorageInput { + s.SecretKey = &v + return s +} + +// SetServerHostname sets the ServerHostname field's value. +func (s *CreateLocationObjectStorageInput) SetServerHostname(v string) *CreateLocationObjectStorageInput { + s.ServerHostname = &v + return s +} + +// SetServerPort sets the ServerPort field's value. +func (s *CreateLocationObjectStorageInput) SetServerPort(v int64) *CreateLocationObjectStorageInput { + s.ServerPort = &v + return s +} + +// SetServerProtocol sets the ServerProtocol field's value. +func (s *CreateLocationObjectStorageInput) SetServerProtocol(v string) *CreateLocationObjectStorageInput { + s.ServerProtocol = &v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *CreateLocationObjectStorageInput) SetSubdirectory(v string) *CreateLocationObjectStorageInput { + s.Subdirectory = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLocationObjectStorageInput) SetTags(v []*TagListEntry) *CreateLocationObjectStorageInput { + s.Tags = v + return s +} + +// CreateLocationObjectStorageResponse +type CreateLocationObjectStorageOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the agents associated with the self-managed + // object storage server location. + LocationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateLocationObjectStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocationObjectStorageOutput) GoString() string { + return s.String() +} + +// SetLocationArn sets the LocationArn field's value. +func (s *CreateLocationObjectStorageOutput) SetLocationArn(v string) *CreateLocationObjectStorageOutput { + s.LocationArn = &v + return s +} + // CreateLocationS3Request type CreateLocationS3Input struct { _ struct{} `type:"structure"` @@ -3633,7 +3992,7 @@ type CreateLocationSmbInput struct { // The path should be such that it can be mounted by other SMB clients in your // network. // - // Subdirectory must be specified with forward slashes. For example /path/to/folder. + // Subdirectory must be specified with forward slashes. For example, /path/to/folder. // // To transfer all the data in the folder you specified, DataSync needs to have // permissions to mount the SMB share, as well as to access all the data in @@ -3785,12 +4144,6 @@ type CreateTaskInput struct { // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is // used to monitor and log events in the task. - // - // For more information on these groups, see Working with Log Groups and Log - // Streams in the Amazon CloudWatch User Guide. - // - // For more information about how to use CloudWatch Logs with DataSync, see - // Monitoring Your Task in the AWS DataSync User Guide. CloudWatchLogGroupArn *string `type:"string"` // The Amazon Resource Name (ARN) of an AWS storage resource's location. @@ -3814,7 +4167,7 @@ type CreateTaskInput struct { // file permissions, data integrity verification, and so on. // // For each individual task execution, you can override these options by specifying - // the OverrideOptions before starting a the task execution. For more information, + // the OverrideOptions before starting the task execution. For more information, // see the operation. Options *Options `type:"structure"` @@ -4164,7 +4517,7 @@ type DescribeAgentOutput struct { CreationTime *time.Time `type:"timestamp"` // The type of endpoint that your agent is connected to. If the endpoint is - // a VPC endpoint, the agent is not accessible over the public Internet. + // a VPC endpoint, the agent is not accessible over the public internet. EndpointType *string `type:"string" enum:"EndpointType"` // The time that the agent last connected to DataSyc. @@ -4288,7 +4641,7 @@ type DescribeLocationEfsOutput struct { // with the security group on the mount target in the subnet specified. Ec2Config *Ec2Config `type:"structure"` - // The Amazon resource Name (ARN) of the EFS location that was described. + // The Amazon Resource Name (ARN) of the EFS location that was described. LocationArn *string `type:"string"` // The URL of the EFS location that was described. @@ -4376,14 +4729,14 @@ type DescribeLocationFsxWindowsOutput struct { // The name of the Windows domain that the FSx for Windows server belongs to. Domain *string `type:"string"` - // The Amazon resource Name (ARN) of the FSx for Windows location that was described. + // The Amazon Resource Name (ARN) of the FSx for Windows location that was described. LocationArn *string `type:"string"` // The URL of the FSx for Windows location that was described. LocationUri *string `type:"string"` // The Amazon Resource Names (ARNs) of the security groups that are configured - // for the for the FSx for Windows file system. + // for the FSx for Windows file system. SecurityGroupArns []*string `min:"1" type:"list"` // The user who has the permissions to access files and folders in the FSx for @@ -4441,7 +4794,7 @@ func (s *DescribeLocationFsxWindowsOutput) SetUser(v string) *DescribeLocationFs type DescribeLocationNfsInput struct { _ struct{} `type:"structure"` - // The Amazon resource Name (ARN) of the NFS location to describe. + // The Amazon Resource Name (ARN) of the NFS location to describe. // // LocationArn is a required field LocationArn *string `type:"string" required:"true"` @@ -4483,7 +4836,7 @@ type DescribeLocationNfsOutput struct { // The time that the NFS location was created. CreationTime *time.Time `type:"timestamp"` - // The Amazon resource Name (ARN) of the NFS location that was described. + // The Amazon Resource Name (ARN) of the NFS location that was described. LocationArn *string `type:"string"` // The URL of the source NFS location that was described. @@ -4537,6 +4890,131 @@ func (s *DescribeLocationNfsOutput) SetOnPremConfig(v *OnPremConfig) *DescribeLo return s } +// DescribeLocationObjectStorageRequest +type DescribeLocationObjectStorageInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the self-managed object storage server + // location that was described. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLocationObjectStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationObjectStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocationObjectStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocationObjectStorageInput"} + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationObjectStorageInput) SetLocationArn(v string) *DescribeLocationObjectStorageInput { + s.LocationArn = &v + return s +} + +// DescribeLocationObjectStorageResponse +type DescribeLocationObjectStorageOutput struct { + _ struct{} `type:"structure"` + + // Optional. The access key is used if credentials are required to access the + // self-managed object storage server. + AccessKey *string `min:"8" type:"string"` + + // The Amazon Resource Name (ARN) of the agents associated with the self-managed + // object storage server location. + AgentArns []*string `min:"1" type:"list"` + + // The time that the self-managed object storage server agent was created. + CreationTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the self-managed object storage server + // location to describe. + LocationArn *string `type:"string"` + + // The URL of the source self-managed object storage server location that was + // described. + LocationUri *string `type:"string"` + + // The port that your self-managed object storage server accepts inbound network + // traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 + // (HTTPS). + ServerPort *int64 `min:"1" type:"integer"` + + // The protocol that the object storage server uses to communicate. Valid values + // are HTTP or HTTPS. + ServerProtocol *string `type:"string" enum:"ObjectStorageServerProtocol"` +} + +// String returns the string representation +func (s DescribeLocationObjectStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationObjectStorageOutput) GoString() string { + return s.String() +} + +// SetAccessKey sets the AccessKey field's value. +func (s *DescribeLocationObjectStorageOutput) SetAccessKey(v string) *DescribeLocationObjectStorageOutput { + s.AccessKey = &v + return s +} + +// SetAgentArns sets the AgentArns field's value. +func (s *DescribeLocationObjectStorageOutput) SetAgentArns(v []*string) *DescribeLocationObjectStorageOutput { + s.AgentArns = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeLocationObjectStorageOutput) SetCreationTime(v time.Time) *DescribeLocationObjectStorageOutput { + s.CreationTime = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationObjectStorageOutput) SetLocationArn(v string) *DescribeLocationObjectStorageOutput { + s.LocationArn = &v + return s +} + +// SetLocationUri sets the LocationUri field's value. +func (s *DescribeLocationObjectStorageOutput) SetLocationUri(v string) *DescribeLocationObjectStorageOutput { + s.LocationUri = &v + return s +} + +// SetServerPort sets the ServerPort field's value. +func (s *DescribeLocationObjectStorageOutput) SetServerPort(v int64) *DescribeLocationObjectStorageOutput { + s.ServerPort = &v + return s +} + +// SetServerProtocol sets the ServerProtocol field's value. +func (s *DescribeLocationObjectStorageOutput) SetServerProtocol(v string) *DescribeLocationObjectStorageOutput { + s.ServerProtocol = &v + return s +} + // DescribeLocationS3Request type DescribeLocationS3Input struct { _ struct{} `type:"structure"` @@ -4649,7 +5127,7 @@ func (s *DescribeLocationS3Output) SetS3StorageClass(v string) *DescribeLocation type DescribeLocationSmbInput struct { _ struct{} `type:"structure"` - // The Amazon resource Name (ARN) of the SMB location to describe. + // The Amazon Resource Name (ARN) of the SMB location to describe. // // LocationArn is a required field LocationArn *string `type:"string" required:"true"` @@ -4698,7 +5176,7 @@ type DescribeLocationSmbOutput struct { // The name of the Windows domain that the SMB server belongs to. Domain *string `type:"string"` - // The Amazon resource Name (ARN) of the SMB location that was described. + // The Amazon Resource Name (ARN) of the SMB location that was described. LocationArn *string `type:"string"` // The URL of the source SBM location that was described. @@ -5938,10 +6416,12 @@ type Options struct { // NONE: Ignore UID and GID. Gid *string `type:"string" enum:"Gid"` - // A value that determines the type of logs DataSync will deliver to your AWS - // CloudWatch Logs file. If set to OFF, no logs will be delivered. BASIC will - // deliver a few logs per transfer operation and TRANSFER will deliver a verbose - // log that contains logs for every file that is transferred. + // A value that determines the type of logs that DataSync publishes to a log + // stream in the Amazon CloudWatch log group that you provide. For more information + // about providing a log group for DataSync, see CloudWatchLogGroupArn (https://docs.aws.amazon.com/datasync/latest/userguide/API_CreateTask.html#DataSync-CreateTask-request-CloudWatchLogGroupArn). + // If set to OFF, no logs are published. BASIC publishes logs on errors for + // individual files transferred, and TRANSFER publishes logs for every file + // or object that is transferred and integrity checked. LogLevel *string `type:"string" enum:"LogLevel"` // A value that indicates the last time that a file was modified (that is, a @@ -6012,10 +6492,16 @@ type Options struct { // A value that determines whether tasks should be queued before executing the // tasks. If set to ENABLED, the tasks will be queued. The default is ENABLED. // - // If you use the same agent to run multiple tasks you can enable the tasks - // to run in series. For more information see queue-task-execution. + // If you use the same agent to run multiple tasks, you can enable the tasks + // to run in series. For more information, see queue-task-execution. TaskQueueing *string `type:"string" enum:"TaskQueueing"` + // TransferMode has two values: CHANGED and ALL. CHANGED performs an "incremental" + // or "delta sync", it compares file modification time between source and destination + // to determine which files need to be transferred. ALL skips destination inventory + // and transfers all files discovered on the source. + TransferMode *string `type:"string" enum:"TransferMode"` + // The user ID (UID) of the file's owner. // // Default value: INT_VALUE. This preserves the integer value of the ID. @@ -6027,14 +6513,21 @@ type Options struct { // A value that determines whether a data integrity verification should be performed // at the end of a task execution after all data and metadata have been transferred. + // For more information, see create-task // // Default value: POINT_IN_TIME_CONSISTENT. // - // POINT_IN_TIME_CONSISTENT: Perform verification (recommended). + // ONLY_FILES_TRANSFERRED (recommended): Perform verification only on files + // that were transferred. // - // ONLY_FILES_TRANSFERRED: Perform verification on only files that were transferred. + // POINT_IN_TIME_CONSISTENT: Scan the entire source and entire destination at + // the end of the transfer to verify that source and destination are fully synchronized. + // This option isn't supported when transferring to S3 Glacier or S3 Glacier + // Deep Archive storage classes. // - // NONE: Skip verification. + // NONE: No additional verification is done at the end of the transfer, but + // all data transmissions are integrity-checked with checksum verification during + // the transfer. VerifyMode *string `type:"string" enum:"VerifyMode"` } @@ -6121,6 +6614,12 @@ func (s *Options) SetTaskQueueing(v string) *Options { return s } +// SetTransferMode sets the TransferMode field's value. +func (s *Options) SetTransferMode(v string) *Options { + s.TransferMode = &v + return s +} + // SetUid sets the Uid field's value. func (s *Options) SetUid(v string) *Options { s.Uid = &v @@ -6133,7 +6632,7 @@ func (s *Options) SetVerifyMode(v string) *Options { return s } -// The VPC endpoint, subnet and security group that an agent uses to access +// The VPC endpoint, subnet, and security group that an agent uses to access // IP addresses in a VPC (Virtual Private Cloud). type PrivateLinkConfig struct { _ struct{} `type:"structure"` @@ -6141,7 +6640,7 @@ type PrivateLinkConfig struct { // The private endpoint that is configured for an agent that has access to IP // addresses in a PrivateLink (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html). // An agent that is configured with this endpoint will not be accessible over - // the public Internet. + // the public internet. PrivateLinkEndpoint *string `min:"7" type:"string"` // The Amazon Resource Names (ARNs) of the security groups that are configured @@ -6155,7 +6654,7 @@ type PrivateLinkConfig struct { // The ID of the VPC endpoint that is configured for an agent. An agent that // is configured with a VPC endpoint will not be accessible over the public - // Internet. + // internet. VpcEndpointId *string `type:"string"` } @@ -7058,6 +7557,14 @@ const ( NfsVersionNfs41 = "NFS4_1" ) +const ( + // ObjectStorageServerProtocolHttps is a ObjectStorageServerProtocol enum value + ObjectStorageServerProtocolHttps = "HTTPS" + + // ObjectStorageServerProtocolHttp is a ObjectStorageServerProtocol enum value + ObjectStorageServerProtocolHttp = "HTTP" +) + const ( // OverwriteModeAlways is a OverwriteMode enum value OverwriteModeAlways = "ALWAYS" @@ -7180,6 +7687,14 @@ const ( TaskStatusUnavailable = "UNAVAILABLE" ) +const ( + // TransferModeChanged is a TransferMode enum value + TransferModeChanged = "CHANGED" + + // TransferModeAll is a TransferMode enum value + TransferModeAll = "ALL" +) + const ( // UidNone is a Uid enum value UidNone = "NONE" diff --git a/service/datasync/datasynciface/interface.go b/service/datasync/datasynciface/interface.go index 7196b16cd71..ead86827a4a 100644 --- a/service/datasync/datasynciface/interface.go +++ b/service/datasync/datasynciface/interface.go @@ -80,6 +80,10 @@ type DataSyncAPI interface { CreateLocationNfsWithContext(aws.Context, *datasync.CreateLocationNfsInput, ...request.Option) (*datasync.CreateLocationNfsOutput, error) CreateLocationNfsRequest(*datasync.CreateLocationNfsInput) (*request.Request, *datasync.CreateLocationNfsOutput) + CreateLocationObjectStorage(*datasync.CreateLocationObjectStorageInput) (*datasync.CreateLocationObjectStorageOutput, error) + CreateLocationObjectStorageWithContext(aws.Context, *datasync.CreateLocationObjectStorageInput, ...request.Option) (*datasync.CreateLocationObjectStorageOutput, error) + CreateLocationObjectStorageRequest(*datasync.CreateLocationObjectStorageInput) (*request.Request, *datasync.CreateLocationObjectStorageOutput) + CreateLocationS3(*datasync.CreateLocationS3Input) (*datasync.CreateLocationS3Output, error) CreateLocationS3WithContext(aws.Context, *datasync.CreateLocationS3Input, ...request.Option) (*datasync.CreateLocationS3Output, error) CreateLocationS3Request(*datasync.CreateLocationS3Input) (*request.Request, *datasync.CreateLocationS3Output) @@ -120,6 +124,10 @@ type DataSyncAPI interface { DescribeLocationNfsWithContext(aws.Context, *datasync.DescribeLocationNfsInput, ...request.Option) (*datasync.DescribeLocationNfsOutput, error) DescribeLocationNfsRequest(*datasync.DescribeLocationNfsInput) (*request.Request, *datasync.DescribeLocationNfsOutput) + DescribeLocationObjectStorage(*datasync.DescribeLocationObjectStorageInput) (*datasync.DescribeLocationObjectStorageOutput, error) + DescribeLocationObjectStorageWithContext(aws.Context, *datasync.DescribeLocationObjectStorageInput, ...request.Option) (*datasync.DescribeLocationObjectStorageOutput, error) + DescribeLocationObjectStorageRequest(*datasync.DescribeLocationObjectStorageInput) (*request.Request, *datasync.DescribeLocationObjectStorageOutput) + DescribeLocationS3(*datasync.DescribeLocationS3Input) (*datasync.DescribeLocationS3Output, error) DescribeLocationS3WithContext(aws.Context, *datasync.DescribeLocationS3Input, ...request.Option) (*datasync.DescribeLocationS3Output, error) DescribeLocationS3Request(*datasync.DescribeLocationS3Input) (*request.Request, *datasync.DescribeLocationS3Output) diff --git a/service/ec2/api.go b/service/ec2/api.go index 6ea2559aec0..7d652e9ef5c 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -113289,6 +113289,33 @@ const ( // InstanceTypeR6g16xlarge is a InstanceType enum value InstanceTypeR6g16xlarge = "r6g.16xlarge" + // InstanceTypeR6gdMetal is a InstanceType enum value + InstanceTypeR6gdMetal = "r6gd.metal" + + // InstanceTypeR6gdMedium is a InstanceType enum value + InstanceTypeR6gdMedium = "r6gd.medium" + + // InstanceTypeR6gdLarge is a InstanceType enum value + InstanceTypeR6gdLarge = "r6gd.large" + + // InstanceTypeR6gdXlarge is a InstanceType enum value + InstanceTypeR6gdXlarge = "r6gd.xlarge" + + // InstanceTypeR6gd2xlarge is a InstanceType enum value + InstanceTypeR6gd2xlarge = "r6gd.2xlarge" + + // InstanceTypeR6gd4xlarge is a InstanceType enum value + InstanceTypeR6gd4xlarge = "r6gd.4xlarge" + + // InstanceTypeR6gd8xlarge is a InstanceType enum value + InstanceTypeR6gd8xlarge = "r6gd.8xlarge" + + // InstanceTypeR6gd12xlarge is a InstanceType enum value + InstanceTypeR6gd12xlarge = "r6gd.12xlarge" + + // InstanceTypeR6gd16xlarge is a InstanceType enum value + InstanceTypeR6gd16xlarge = "r6gd.16xlarge" + // InstanceTypeX116xlarge is a InstanceType enum value InstanceTypeX116xlarge = "x1.16xlarge" @@ -113535,6 +113562,33 @@ const ( // InstanceTypeC6g16xlarge is a InstanceType enum value InstanceTypeC6g16xlarge = "c6g.16xlarge" + // InstanceTypeC6gdMetal is a InstanceType enum value + InstanceTypeC6gdMetal = "c6gd.metal" + + // InstanceTypeC6gdMedium is a InstanceType enum value + InstanceTypeC6gdMedium = "c6gd.medium" + + // InstanceTypeC6gdLarge is a InstanceType enum value + InstanceTypeC6gdLarge = "c6gd.large" + + // InstanceTypeC6gdXlarge is a InstanceType enum value + InstanceTypeC6gdXlarge = "c6gd.xlarge" + + // InstanceTypeC6gd2xlarge is a InstanceType enum value + InstanceTypeC6gd2xlarge = "c6gd.2xlarge" + + // InstanceTypeC6gd4xlarge is a InstanceType enum value + InstanceTypeC6gd4xlarge = "c6gd.4xlarge" + + // InstanceTypeC6gd8xlarge is a InstanceType enum value + InstanceTypeC6gd8xlarge = "c6gd.8xlarge" + + // InstanceTypeC6gd12xlarge is a InstanceType enum value + InstanceTypeC6gd12xlarge = "c6gd.12xlarge" + + // InstanceTypeC6gd16xlarge is a InstanceType enum value + InstanceTypeC6gd16xlarge = "c6gd.16xlarge" + // InstanceTypeCc14xlarge is a InstanceType enum value InstanceTypeCc14xlarge = "cc1.4xlarge" @@ -113927,6 +113981,33 @@ const ( // InstanceTypeM6g16xlarge is a InstanceType enum value InstanceTypeM6g16xlarge = "m6g.16xlarge" + + // InstanceTypeM6gdMetal is a InstanceType enum value + InstanceTypeM6gdMetal = "m6gd.metal" + + // InstanceTypeM6gdMedium is a InstanceType enum value + InstanceTypeM6gdMedium = "m6gd.medium" + + // InstanceTypeM6gdLarge is a InstanceType enum value + InstanceTypeM6gdLarge = "m6gd.large" + + // InstanceTypeM6gdXlarge is a InstanceType enum value + InstanceTypeM6gdXlarge = "m6gd.xlarge" + + // InstanceTypeM6gd2xlarge is a InstanceType enum value + InstanceTypeM6gd2xlarge = "m6gd.2xlarge" + + // InstanceTypeM6gd4xlarge is a InstanceType enum value + InstanceTypeM6gd4xlarge = "m6gd.4xlarge" + + // InstanceTypeM6gd8xlarge is a InstanceType enum value + InstanceTypeM6gd8xlarge = "m6gd.8xlarge" + + // InstanceTypeM6gd12xlarge is a InstanceType enum value + InstanceTypeM6gd12xlarge = "m6gd.12xlarge" + + // InstanceTypeM6gd16xlarge is a InstanceType enum value + InstanceTypeM6gd16xlarge = "m6gd.16xlarge" ) const ( diff --git a/service/frauddetector/api.go b/service/frauddetector/api.go index 3452251e417..daf2b717a99 100644 --- a/service/frauddetector/api.go +++ b/service/frauddetector/api.go @@ -6643,10 +6643,14 @@ type Entity struct { // The entity ID. If you do not know the entityId, you can pass unknown, which // is areserved string literal. - EntityId *string `locationName:"entityId" min:"1" type:"string"` + // + // EntityId is a required field + EntityId *string `locationName:"entityId" min:"1" type:"string" required:"true"` // The entity type. - EntityType *string `locationName:"entityType" type:"string"` + // + // EntityType is a required field + EntityType *string `locationName:"entityType" type:"string" required:"true"` } // String returns the string representation @@ -6662,9 +6666,15 @@ func (s Entity) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Entity) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Entity"} + if s.EntityId == nil { + invalidParams.Add(request.NewErrParamRequired("EntityId")) + } if s.EntityId != nil && len(*s.EntityId) < 1 { invalidParams.Add(request.NewErrParamMinLen("EntityId", 1)) } + if s.EntityType == nil { + invalidParams.Add(request.NewErrParamRequired("EntityType")) + } if invalidParams.Len() > 0 { return invalidParams @@ -6901,9 +6911,6 @@ type ExternalModel struct { // Timestamp of when the model was last created. CreatedTime *string `locationName:"createdTime" type:"string"` - // The event type names. - EventTypeName *string `locationName:"eventTypeName" min:"1" type:"string"` - // The input configuration. InputConfiguration *ModelInputConfiguration `locationName:"inputConfiguration" type:"structure"` @@ -6948,12 +6955,6 @@ func (s *ExternalModel) SetCreatedTime(v string) *ExternalModel { return s } -// SetEventTypeName sets the EventTypeName field's value. -func (s *ExternalModel) SetEventTypeName(v string) *ExternalModel { - s.EventTypeName = &v - return s -} - // SetInputConfiguration sets the InputConfiguration field's value. func (s *ExternalModel) SetInputConfiguration(v *ModelInputConfiguration) *ExternalModel { s.InputConfiguration = v @@ -8969,6 +8970,9 @@ type ModelInputConfiguration struct { // the variable values before being sent to SageMaker. CsvInputTemplate *string `locationName:"csvInputTemplate" type:"string"` + // The event type name. + EventTypeName *string `locationName:"eventTypeName" min:"1" type:"string"` + // The format of the model input configuration. The format differs depending // on if it is passed through to SageMaker or constructed by Amazon Fraud Detector. Format *string `locationName:"format" type:"string" enum:"ModelInputDataFormat"` @@ -8997,6 +9001,9 @@ func (s ModelInputConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ModelInputConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ModelInputConfiguration"} + if s.EventTypeName != nil && len(*s.EventTypeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EventTypeName", 1)) + } if s.UseEventVariables == nil { invalidParams.Add(request.NewErrParamRequired("UseEventVariables")) } @@ -9013,6 +9020,12 @@ func (s *ModelInputConfiguration) SetCsvInputTemplate(v string) *ModelInputConfi return s } +// SetEventTypeName sets the EventTypeName field's value. +func (s *ModelInputConfiguration) SetEventTypeName(v string) *ModelInputConfiguration { + s.EventTypeName = &v + return s +} + // SetFormat sets the Format field's value. func (s *ModelInputConfiguration) SetFormat(v string) *ModelInputConfiguration { s.Format = &v @@ -9704,9 +9717,6 @@ func (s PutEventTypeOutput) GoString() string { type PutExternalModelInput struct { _ struct{} `type:"structure"` - // The event type name. - EventTypeName *string `locationName:"eventTypeName" min:"1" type:"string"` - // The model endpoint input configuration. // // InputConfiguration is a required field @@ -9754,9 +9764,6 @@ func (s PutExternalModelInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutExternalModelInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutExternalModelInput"} - if s.EventTypeName != nil && len(*s.EventTypeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EventTypeName", 1)) - } if s.InputConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("InputConfiguration")) } @@ -9805,12 +9812,6 @@ func (s *PutExternalModelInput) Validate() error { return nil } -// SetEventTypeName sets the EventTypeName field's value. -func (s *PutExternalModelInput) SetEventTypeName(v string) *PutExternalModelInput { - s.EventTypeName = &v - return s -} - // SetInputConfiguration sets the InputConfiguration field's value. func (s *PutExternalModelInput) SetInputConfiguration(v *ModelInputConfiguration) *PutExternalModelInput { s.InputConfiguration = v diff --git a/service/glue/api.go b/service/glue/api.go index a2854bf7792..2983b94b3c9 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -10437,6 +10437,100 @@ func (c *Glue) ResetJobBookmarkWithContext(ctx aws.Context, input *ResetJobBookm return out, req.Send() } +const opResumeWorkflowRun = "ResumeWorkflowRun" + +// ResumeWorkflowRunRequest generates a "aws/request.Request" representing the +// client's request for the ResumeWorkflowRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResumeWorkflowRun for more information on using the ResumeWorkflowRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResumeWorkflowRunRequest method. +// req, resp := client.ResumeWorkflowRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRun +func (c *Glue) ResumeWorkflowRunRequest(input *ResumeWorkflowRunInput) (req *request.Request, output *ResumeWorkflowRunOutput) { + op := &request.Operation{ + Name: opResumeWorkflowRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResumeWorkflowRunInput{} + } + + output = &ResumeWorkflowRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResumeWorkflowRun API operation for AWS Glue. +// +// Restarts any completed nodes in a workflow run and resumes the run execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation ResumeWorkflowRun for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input provided was not valid. +// +// * EntityNotFoundException +// A specified entity does not exist +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * ConcurrentRunsExceededException +// Too many jobs are being run concurrently. +// +// * IllegalWorkflowStateException +// The workflow is in an invalid state to perform a requested operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRun +func (c *Glue) ResumeWorkflowRun(input *ResumeWorkflowRunInput) (*ResumeWorkflowRunOutput, error) { + req, out := c.ResumeWorkflowRunRequest(input) + return out, req.Send() +} + +// ResumeWorkflowRunWithContext is the same as ResumeWorkflowRun with the addition of +// the ability to pass a context and additional request options. +// +// See ResumeWorkflowRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ResumeWorkflowRunWithContext(ctx aws.Context, input *ResumeWorkflowRunInput, opts ...request.Option) (*ResumeWorkflowRunOutput, error) { + req, out := c.ResumeWorkflowRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSearchTables = "SearchTables" // SearchTablesRequest generates a "aws/request.Request" representing the @@ -16003,9 +16097,8 @@ type Condition struct { // A logical operator. LogicalOperator *string `type:"string" enum:"LogicalOperator"` - // The condition state. Currently, the only job states that a trigger can listen - // for are SUCCEEDED, STOPPED, FAILED, and TIMEOUT. The only crawler states - // that a trigger can listen for are SUCCEEDED, FAILED, and CANCELLED. + // The condition state. Currently, the values supported are SUCCEEDED, STOPPED, + // TIMEOUT, and FAILED. State *string `type:"string" enum:"JobRunState"` } @@ -18256,9 +18349,10 @@ type CreateJobInput struct { // * When you specify a Python shell job (JobCommand.Name="pythonshell"), // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") + // or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you + // can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type + // cannot have a fractional DPU allocation. MaxCapacity *float64 `type:"double"` // The maximum number of times to retry this job if it fails. @@ -22031,7 +22125,7 @@ func (s *DynamoDBTarget) SetScanRate(v float64) *DynamoDBTarget { } // An edge represents a directed connection between two AWS Glue components -// which are part of the workflow the edge belongs to. +// that are part of the workflow the edge belongs to. type Edge struct { _ struct{} `type:"structure"` @@ -27676,14 +27770,16 @@ type Job struct { // Do not set Max Capacity if using WorkerType and NumberOfWorkers. // // The value that can be allocated for MaxCapacity depends on whether you are - // running a Python shell job or an Apache Spark ETL job: + // running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming + // ETL job: // // * When you specify a Python shell job (JobCommand.Name="pythonshell"), // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") + // or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you + // can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type + // cannot have a fractional DPU allocation. MaxCapacity *float64 `type:"double"` // The maximum number of times to retry this job after a JobRun fails. @@ -27979,7 +28075,8 @@ type JobCommand struct { _ struct{} `type:"structure"` // The name of the job command. For an Apache Spark ETL job, this must be glueetl. - // For a Python shell job, it must be pythonshell. + // For a Python shell job, it must be pythonshell. For an Apache Spark streaming + // ETL job, this must be gluestreaming. Name *string `type:"string"` // The Python version being used to execute a Python shell job. Allowed values @@ -28102,8 +28199,7 @@ type JobRun struct { // The name of the job definition being used in this run. JobName *string `min:"1" type:"string"` - // The current state of the job run. For more information about the statuses - // of jobs that have terminated abnormally, see AWS Glue Job Run Statuses (https://docs.aws.amazon.com/glue/latest/dg/job-run-statuses.html). + // The current state of the job run. JobRunState *string `type:"string" enum:"JobRunState"` // The last time that this job run was modified. @@ -28391,9 +28487,10 @@ type JobUpdate struct { // * When you specify a Python shell job (JobCommand.Name="pythonshell"), // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") + // or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you + // can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type + // cannot have a fractional DPU allocation. MaxCapacity *float64 `type:"double"` // The maximum number of times to retry this job if it fails. @@ -29880,8 +29977,8 @@ func (s *NoScheduleException) RequestID() string { return s.RespMetadata.RequestID } -// A node represents an AWS Glue component like Trigger, Job etc. which is part -// of a workflow. +// A node represents an AWS Glue component such as a trigger, or job, etc., +// that is part of a workflow. type Node struct { _ struct{} `type:"structure"` @@ -31037,6 +31134,112 @@ func (s *ResourceUri) SetUri(v string) *ResourceUri { return s } +type ResumeWorkflowRunInput struct { + _ struct{} `type:"structure"` + + // The name of the workflow to resume. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of the node IDs for the nodes you want to restart. The nodes that + // are to be restarted must have an execution attempt in the original run. + // + // NodeIds is a required field + NodeIds []*string `type:"list" required:"true"` + + // The ID of the workflow run to resume. + // + // RunId is a required field + RunId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResumeWorkflowRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeWorkflowRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResumeWorkflowRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResumeWorkflowRunInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.NodeIds == nil { + invalidParams.Add(request.NewErrParamRequired("NodeIds")) + } + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) + } + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *ResumeWorkflowRunInput) SetName(v string) *ResumeWorkflowRunInput { + s.Name = &v + return s +} + +// SetNodeIds sets the NodeIds field's value. +func (s *ResumeWorkflowRunInput) SetNodeIds(v []*string) *ResumeWorkflowRunInput { + s.NodeIds = v + return s +} + +// SetRunId sets the RunId field's value. +func (s *ResumeWorkflowRunInput) SetRunId(v string) *ResumeWorkflowRunInput { + s.RunId = &v + return s +} + +type ResumeWorkflowRunOutput struct { + _ struct{} `type:"structure"` + + // A list of the node IDs for the nodes that were actually restarted. + NodeIds []*string `type:"list"` + + // The new ID assigned to the resumed workflow run. Each resume of a workflow + // run will have a new run ID. + RunId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResumeWorkflowRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeWorkflowRunOutput) GoString() string { + return s.String() +} + +// SetNodeIds sets the NodeIds field's value. +func (s *ResumeWorkflowRunOutput) SetNodeIds(v []*string) *ResumeWorkflowRunOutput { + s.NodeIds = v + return s +} + +// SetRunId sets the RunId field's value. +func (s *ResumeWorkflowRunOutput) SetRunId(v string) *ResumeWorkflowRunOutput { + s.RunId = &v + return s +} + // Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted. type S3Encryption struct { _ struct{} `type:"structure"` @@ -36881,9 +37084,12 @@ type WorkflowRun struct { // as nodes and directed connections between them as edges. Graph *WorkflowGraph `type:"structure"` - // Name of the workflow which was executed. + // Name of the workflow that was executed. Name *string `min:"1" type:"string"` + // The ID of the previous workflow run. + PreviousRunId *string `min:"1" type:"string"` + // The date and time when the workflow run was started. StartedOn *time.Time `type:"timestamp"` @@ -36928,6 +37134,12 @@ func (s *WorkflowRun) SetName(v string) *WorkflowRun { return s } +// SetPreviousRunId sets the PreviousRunId field's value. +func (s *WorkflowRun) SetPreviousRunId(v string) *WorkflowRun { + s.PreviousRunId = &v + return s +} + // SetStartedOn sets the StartedOn field's value. func (s *WorkflowRun) SetStartedOn(v time.Time) *WorkflowRun { s.StartedOn = &v @@ -36962,19 +37174,19 @@ func (s *WorkflowRun) SetWorkflowRunProperties(v map[string]*string) *WorkflowRu type WorkflowRunStatistics struct { _ struct{} `type:"structure"` - // Total number of Actions which have failed. + // Total number of Actions that have failed. FailedActions *int64 `type:"integer"` // Total number Actions in running state. RunningActions *int64 `type:"integer"` - // Total number of Actions which have stopped. + // Total number of Actions that have stopped. StoppedActions *int64 `type:"integer"` - // Total number of Actions which have succeeded. + // Total number of Actions that have succeeded. SucceededActions *int64 `type:"integer"` - // Total number of Actions which timed out. + // Total number of Actions that timed out. TimeoutActions *int64 `type:"integer"` // Total number of Actions in the workflow run. diff --git a/service/glue/glueiface/interface.go b/service/glue/glueiface/interface.go index aafb2300023..f749759d0ab 100644 --- a/service/glue/glueiface/interface.go +++ b/service/glue/glueiface/interface.go @@ -533,6 +533,10 @@ type GlueAPI interface { ResetJobBookmarkWithContext(aws.Context, *glue.ResetJobBookmarkInput, ...request.Option) (*glue.ResetJobBookmarkOutput, error) ResetJobBookmarkRequest(*glue.ResetJobBookmarkInput) (*request.Request, *glue.ResetJobBookmarkOutput) + ResumeWorkflowRun(*glue.ResumeWorkflowRunInput) (*glue.ResumeWorkflowRunOutput, error) + ResumeWorkflowRunWithContext(aws.Context, *glue.ResumeWorkflowRunInput, ...request.Option) (*glue.ResumeWorkflowRunOutput, error) + ResumeWorkflowRunRequest(*glue.ResumeWorkflowRunInput) (*request.Request, *glue.ResumeWorkflowRunOutput) + SearchTables(*glue.SearchTablesInput) (*glue.SearchTablesOutput, error) SearchTablesWithContext(aws.Context, *glue.SearchTablesInput, ...request.Option) (*glue.SearchTablesOutput, error) SearchTablesRequest(*glue.SearchTablesInput) (*request.Request, *glue.SearchTablesOutput) diff --git a/service/ssm/api.go b/service/ssm/api.go index dcea0517a58..f10972d5f1c 100644 --- a/service/ssm/api.go +++ b/service/ssm/api.go @@ -15195,6 +15195,9 @@ func (s *Command) SetTimeoutSeconds(v int64) *Command { } // Describes a command filter. +// +// An instance ID can't be specified when a command status is Pending because +// the command hasn't run on the instance yet. type CommandFilter struct { _ struct{} `type:"structure"` @@ -22236,6 +22239,15 @@ type DescribePatchGroupsInput struct { _ struct{} `type:"structure"` // One or more filters. Use a filter to return a more specific list of results. + // + // For DescribePatchGroups,valid filter keys include the following: + // + // * NAME_PREFIX: The name of the patch group. Wildcards (*) are accepted. + // + // * OPERATING_SYSTEM: The supported operating system type to return results + // for. For valid operating system values, see GetDefaultPatchBaselineRequest$OperatingSystem + // in CreatePatchBaseline. Examples: --filters Key=NAME_PREFIX,Values=MyPatchGroup* + // --filters Key=OPERATING_SYSTEM,Values=AMAZON_LINUX_2 Filters []*PatchOrchestratorFilter `type:"list"` // The maximum number of patch groups to return (per page). @@ -24104,6 +24116,8 @@ type GetCommandInvocationInput struct { // (Optional) The name of the plugin for which you want detailed results. If // the document contains only one plugin, the name can be omitted and the details // will be returned. + // + // Plugin names are also referred to as step names in Systems Manager documents. PluginName *string `min:"4" type:"string"` } @@ -27510,8 +27524,12 @@ type InstanceInformation struct { IPAddress *string `min:"1" type:"string"` // The Amazon Identity and Access Management (IAM) role assigned to the on-premises - // Systems Manager managed instances. This call does not return the IAM role - // for EC2 instances. + // Systems Manager managed instance. This call does not return the IAM role + // for EC2 instances. To retrieve the IAM role for an EC2 instance, use the + // Amazon EC2 DescribeInstances action. For information, see DescribeInstances + // (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // in the Amazon EC2 API Reference or describe-instances (http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html) + // in the AWS CLI Command Reference. IamRole *string `type:"string"` // The instance ID. @@ -27532,7 +27550,17 @@ type InstanceInformation struct { // The last date the association was successfully run. LastSuccessfulAssociationExecutionDate *time.Time `type:"timestamp"` - // The name of the managed instance. + // The name assigned to an on-premises server or virtual machine (VM) when it + // is activated as a Systems Manager managed instance. The name is specified + // as the DefaultInstanceName property using the CreateActivation command. It + // is applied to the managed instance by specifying the Activation Code and + // Activation ID when you install SSM Agent on the instance, as explained in + // Install SSM Agent for a hybrid environment (Linux) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-linux.html) + // and Install SSM Agent for a hybrid environment (Windows) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html). + // To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances + // action. For information, see DescribeInstances (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // in the Amazon EC2 API Reference or describe-instances (http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html) + // in the AWS CLI Command Reference. Name *string `type:"string"` // Connection status of SSM Agent. @@ -32173,6 +32201,9 @@ type ListCommandsInput struct { Filters []*CommandFilter `min:"1" type:"list"` // (Optional) Lists commands issued against this instance ID. + // + // You can't specify an instance ID in the same command that you specify Status + // = Pending. This is because the command has not reached the instance yet. InstanceId *string `type:"string"` // (Optional) The maximum number of items to return for this call. The call @@ -40423,11 +40454,18 @@ type SendCommandInput struct { // --document-version "3" DocumentVersion *string `type:"string"` - // The instance IDs where the command should run. You can specify a maximum - // of 50 IDs. If you prefer not to list individual instance IDs, you can instead - // send commands to a fleet of instances using the Targets parameter, which - // accepts EC2 tags. For more information about how to use targets, see Using - // targets and rate controls to send commands to a fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) + // The IDs of the instances where the command should run. Specifying instance + // IDs is most useful when you are targeting a limited number of instances, + // though you can specify up to 50 IDs. + // + // To target a larger number of instances, or if you prefer not to list individual + // instance IDs, we recommend using the Targets option instead. Using Targets, + // which accepts tag key-value pairs to identify the instances to send commands + // to, you can a send command to tens, hundreds, or thousands of instances at + // once. + // + // For more information about how to use targets, see Using targets and rate + // controls to send commands to a fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) // in the AWS Systems Manager User Guide. InstanceIds []*string `type:"list"` @@ -40468,10 +40506,17 @@ type SendCommandInput struct { // Service (Amazon SNS) notifications for Run Command commands. ServiceRoleArn *string `type:"string"` - // (Optional) An array of search criteria that targets instances using a Key,Value - // combination that you specify. Targets is required if you don't provide one - // or more instance IDs in the call. For more information about how to use targets, - // see Sending commands to a fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) + // An array of search criteria that targets instances using a Key,Value combination + // that you specify. Specifying targets is most useful when you want to send + // a command to a large number of instances at once. Using Targets, which accepts + // tag key-value pairs to identify instances, you can send a command to tens, + // hundreds, or thousands of instances at once. + // + // To send a command to a smaller number of instances, you can use the InstanceIds + // option instead. + // + // For more information about how to use targets, see Sending commands to a + // fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) // in the AWS Systems Manager User Guide. Targets []*Target `type:"list"` @@ -42039,9 +42084,11 @@ func (s *Tag) SetValue(v string) *Tag { // // * Key=tag-key,Values=my-tag-key-1,my-tag-key-2 // -// * (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name +// * Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=resource-group-name +// +// * Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 // -// * (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 +// * Automation targets only: Key=ResourceGroup;Values=resource-group-name // // For example: // @@ -42051,20 +42098,22 @@ func (s *Tag) SetValue(v string) *Tag { // // * Key=tag-key,Values=Name,Instance-Type,CostCenter // -// * (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup +// * Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=ProductionResourceGroup // This example demonstrates how to target all resources in the resource // group ProductionResourceGroup in your maintenance window. // -// * (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC +// * Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC // This example demonstrates how to target only EC2 instances and VPCs in // your maintenance window. // -// * (State Manager association targets only) Key=InstanceIds,Values=* This +// * Automation targets only: Key=ResourceGroup,Values=MyResourceGroup +// +// * State Manager association targets only: Key=InstanceIds,Values=* This // example demonstrates how to target all managed instances in the AWS Region // where the association was created. // -// For information about how to send commands that target instances using Key,Value -// parameters, see Targeting multiple instances (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) +// For more information about how to send commands that target instances using +// Key,Value parameters, see Targeting multiple instances (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) // in the AWS Systems Manager User Guide. type Target struct { _ struct{} `type:"structure"`