From d740a8a3286a31ae47d82cdc6ea569264553b295 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Tue, 2 Jul 2024 14:32:09 -0400 Subject: [PATCH] Release v1.54.13 (2024-07-02) (#5301) Release v1.54.13 (2024-07-02) === ### Service Client Updates * `service/ec2`: Updates service API and documentation * Documentation updates for Elastic Compute Cloud (EC2). * `service/fms`: Updates service API * `service/s3`: Updates service API, documentation, and examples * Added response overrides to Head Object requests. --- CHANGELOG.md | 10 ++ aws/version.go | 2 +- models/apis/ec2/2016-11-15/api-2.json | 1 + models/apis/ec2/2016-11-15/docs-2.json | 16 +- models/apis/fms/2018-01-01/api-2.json | 5 +- models/apis/s3/2006-03-01/api-2.json | 36 +++- models/apis/s3/2006-03-01/docs-2.json | 36 ++-- models/apis/s3/2006-03-01/examples-1.json | 206 +++++++++++----------- service/ec2/api.go | 70 +++++--- service/s3/api.go | 88 +++++++-- service/s3/examples_test.go | 156 ++++++++-------- 11 files changed, 380 insertions(+), 246 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9aff376e7d2..2c05a55f16d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.54.13 (2024-07-02) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * Documentation updates for Elastic Compute Cloud (EC2). +* `service/fms`: Updates service API +* `service/s3`: Updates service API, documentation, and examples + * Added response overrides to Head Object requests. + Release v1.54.12 (2024-07-01) === diff --git a/aws/version.go b/aws/version.go index c0219fd8f6d..f5727092231 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.54.12" +const SDKVersion = "1.54.13" diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index a1e569d1b9f..3ee66003f4f 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -25182,6 +25182,7 @@ "HostTenancy":{ "type":"string", "enum":[ + "default", "dedicated", "host" ] diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 902cc703ccf..304e5cc1da2 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -1930,7 +1930,7 @@ "BlobAttributeValue": { "base": null, "refs": { - "ModifyInstanceAttributeRequest$UserData": "
Changes the instance's user data to the specified value. If you are using an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.
" + "ModifyInstanceAttributeRequest$UserData": "Changes the instance's user data to the specified value. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.
" } }, "BlockDeviceMapping": { @@ -2064,7 +2064,7 @@ "CreateImageRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Indicates whether or not the instance should be automatically rebooted before creating the image. Specify one of the following values:
true
- The instance is not rebooted before creating the image. This creates crash-consistent snapshots that include only the data that has been written to the volumes at the time the snapshots are created. Buffered data and data in memory that has not yet been written to the volumes is not included in the snapshots.
false
- The instance is rebooted before creating the image. This ensures that all buffered data and data in memory is written to the volumes before the snapshots are created.
Default: false
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Indicates whether your client's IP address is preserved as the source. The value is true
or false
.
If true
, your client's IP address is used when you connect to a resource.
If false
, the elastic network interface IP address is used when you connect to a resource.
Default: true
Indicates whether the client IP address is preserved as the source. The following are the possible values.
true
- Use the client IP address as the source.
false
- Use the network interface IP address as the source.
Default: false
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The state of the address pool.
" + "ByoipCidr$State": "The state of the address range.
advertised
: The address range is being advertised to the internet by Amazon Web Services.
deprovisioned
: The address range is deprovisioned.
failed-deprovision
: The request to deprovision the address range was unsuccessful. Ensure that all EIPs from the range have been deallocated and try again.
failed-provision
: The request to provision the address range was unsuccessful.
pending-deprovision
: You’ve submitted a request to deprovision an address range and it's pending.
pending-provision
: You’ve submitted a request to provision an address range and it's pending.
provisioned
: The address range is provisioned and can be advertised. The range is not currently advertised.
provisioned-not-publicly-advertisable
: The address range is provisioned and cannot be advertised.
The number of IPv6 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix
option.
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
Default: 100
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To turn off price protection, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
Default: 100
[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
To indicate no price protection threshold, specify a high value, such as 999999
.
This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.
Default: 20
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.
The parameter accepts an integer, which Amazon EC2 interprets as a percentage.
If you set TargetCapacityUnitType
to vcpu
or memory-mib
, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.
Only one of SpotMaxPricePercentageOverLowestPrice
or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice
can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999
.
The state of the instance as a 16-bit unsigned integer.
The high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal values between 256 and 65,535. These numerical values are used for internal purposes and should be ignored.
The low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal values between 0 and 255.
The valid values for instance-state-code will all be in the range of the low byte and they are:
0
: pending
16
: running
32
: shutting-down
48
: terminated
64
: stopping
80
: stopped
You can ignore the high byte value by zeroing out all of the bits above 2^8 or 256 in decimal.
", "InstanceUsage$UsedInstanceCount": "The number of instances the Amazon Web Services account currently has in the Capacity Reservation.
", "IpPermission$FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).
", @@ -18329,7 +18329,7 @@ "RunInstancesUserData": { "base": null, "refs": { - "RunInstancesRequest$UserData": "The user data script to make available to the instance. For more information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.
" + "RunInstancesRequest$UserData": "The user data to make available to the instance. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.
" } }, "RunScheduledInstancesRequest": { @@ -19771,7 +19771,7 @@ "CreateVerifiedAccessTrustProviderRequest$Description": "A description for the Verified Access trust provider.
", "CreateVerifiedAccessTrustProviderRequest$ClientToken": "A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.
", "CreateVolumePermission$UserId": "The ID of the Amazon Web Services account to be added or removed.
", - "CreateVolumeRequest$OutpostArn": "The Amazon Resource Name (ARN) of the Outpost.
", + "CreateVolumeRequest$OutpostArn": "The Amazon Resource Name (ARN) of the Outpost on which to create the volume.
If you intend to use a volume with an instance running on an outpost, then you must create the volume on the same outpost as the instance. You can't use a volume created in an Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other way around.
", "CreateVolumeRequest$ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.
", "CreateVpcEndpointConnectionNotificationRequest$ConnectionNotificationArn": "The ARN of the SNS topic for the notifications.
", "CreateVpcEndpointConnectionNotificationRequest$ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.
", @@ -23932,7 +23932,7 @@ "refs": { "DescribeVolumeStatusRequest$VolumeIds": "The IDs of the volumes.
Default: Describes all your volumes.
", "DescribeVolumesModificationsRequest$VolumeIds": "The IDs of the volumes.
", - "DescribeVolumesRequest$VolumeIds": "The volume IDs.
", + "DescribeVolumesRequest$VolumeIds": "The volume IDs. If not specified, then all volumes are included in the response.
", "InstanceSpecification$ExcludeDataVolumeIds": "The IDs of the data (non-root) volumes to exclude from the multi-volume snapshot set. If you specify the ID of the root volume, the request fails. To exclude the root volume, use ExcludeBootVolume.
You can specify up to 40 volume IDs per request.
" } }, diff --git a/models/apis/fms/2018-01-01/api-2.json b/models/apis/fms/2018-01-01/api-2.json index 4f729cccb45..c9bfaa4df1b 100644 --- a/models/apis/fms/2018-01-01/api-2.json +++ b/models/apis/fms/2018-01-01/api-2.json @@ -11,7 +11,8 @@ "serviceId":"FMS", "signatureVersion":"v4", "targetPrefix":"AWSFMS_20180101", - "uid":"fms-2018-01-01" + "uid":"fms-2018-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAdminAccount":{ @@ -1741,7 +1742,7 @@ }, "ManagedServiceData":{ "type":"string", - "max":10000, + "max":30000, "min":1, "pattern":"^((?!\\\\[nr]).)+" }, diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index cf7aa8e0df0..840a222c1b5 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -11,7 +11,8 @@ "serviceFullName":"Amazon Simple Storage Service", "serviceId":"S3", "signatureVersion":"s3", - "uid":"s3-2006-03-01" + "uid":"s3-2006-03-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AbortMultipartUpload":{ @@ -1301,7 +1302,8 @@ }, "staticContextParams":{ "UseObjectLambdaEndpoint":{"value":true} - } + }, + "unsignedPayload":true } }, "shapes":{ @@ -4819,6 +4821,36 @@ "location":"header", "locationName":"Range" }, + "ResponseCacheControl":{ + "shape":"ResponseCacheControl", + "location":"querystring", + "locationName":"response-cache-control" + }, + "ResponseContentDisposition":{ + "shape":"ResponseContentDisposition", + "location":"querystring", + "locationName":"response-content-disposition" + }, + "ResponseContentEncoding":{ + "shape":"ResponseContentEncoding", + "location":"querystring", + "locationName":"response-content-encoding" + }, + "ResponseContentLanguage":{ + "shape":"ResponseContentLanguage", + "location":"querystring", + "locationName":"response-content-language" + }, + "ResponseContentType":{ + "shape":"ResponseContentType", + "location":"querystring", + "locationName":"response-content-type" + }, + "ResponseExpires":{ + "shape":"ResponseExpires", + "location":"querystring", + "locationName":"response-expires" + }, "VersionId":{ "shape":"ObjectVersionId", "location":"querystring", diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index 7c38f97c478..b6167257243 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -4,7 +4,7 @@ "operations": { "AbortMultipartUpload": "This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload
operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber
value and the ETag
value that are returned after that part was uploaded.
The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK
response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK
response has been sent. This means that a 200 OK
response can contain either a success or an error. The error response might be embedded in the 200 OK
response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.
You can't use Content-Type: application/x-www-form-urlencoded
for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
header, CompleteMultipartUpload
can still return a 200 OK
response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Error Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag.
HTTP Status Code: 400 Bad Request
Error Code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds. to keep the connection alive while we copy the data.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
All CopyObject
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have read access to the source object and write access to the destination bucket.
General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject
operation.
If the source object is in a general purpose bucket, you must have s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have s3:PutObject
permission to write the object copy to the destination bucket.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key can't be set to ReadOnly
on the copy destination bucket.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length
. You always need to read the entire response body to check if the copy succeeds.
If the copy is successful, you receive a response with information about the copied object.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK
response can contain either a success or an error.
If the error occurs before the copy action starts, you receive a standard Amazon S3 error.
If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK
response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK
status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.
If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CopyObject
:
This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket
.
Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.
General purpose buckets - If you send your CreateBucket
request to the s3.amazonaws.com
global endpoint, the request goes to the us-east-1
Region. So the signature calculations in Signature Version 4 must use us-east-1
as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
. Virtual-hosted-style requests aren't supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - In addition to the s3:CreateBucket
permission, the following permissions are required in a policy when your CreateBucket
request includes specific headers:
Access control lists (ACLs) - In your CreateBucket
request, if you specify an access control list (ACL) and set it to public-read
, public-read-write
, authenticated-read
, or if you explicitly specify any other custom ACLs, both s3:CreateBucket
and s3:PutBucketAcl
permissions are required. In your CreateBucket
request, if you set the ACL to private
, or if you don't specify any ACLs, only the s3:CreateBucket
permission is required.
Object Lock - In your CreateBucket
request, if you set x-amz-bucket-object-lock-enabled
to true, the s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the s3:PutBucketOwnershipControls
permission is required.
To set an ACL on a bucket as part of a CreateBucket
request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced
. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl
to set the ACL. If you try to create a bucket with a public ACL, the request will fail.
For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock
API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
Directory bucket permissions - You must have the s3express:CreateBucket
permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.
The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.
For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.
Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com
.
The following operations are related to CreateBucket
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.
General purpose bucket permissions - For information about the permissions required to use the multipart upload API, see Multipart upload and permissions in the Amazon S3 User Guide.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload
request.
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to protect the data.
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.
All GET
and PUT
requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.
Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.
Directory buckets -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to CreateMultipartUpload
:
Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide.
To make Zonal endpoint API requests on a directory bucket, use the CreateSession
API operation. Specifically, you grant s3express:CreateSession
permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession
API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token
request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession
API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval.
If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide.
You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
CopyObject
API operation - Unlike other Zonal endpoint APIs, the CopyObject
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the CopyObject
API operation on directory buckets, see CopyObject.
HeadBucket
API operation - Unlike other Zonal endpoint APIs, the HeadBucket
API operation doesn't use the temporary security credentials returned from the CreateSession
API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket
API operation on directory buckets, see HeadBucket.
To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession
permission to the bucket. In a policy, you can have the s3express:SessionMode
condition key to control who can create a ReadWrite
or ReadOnly
session. For more information about ReadWrite
or ReadOnly
sessions, see x-amz-create-session-mode
. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession
permission.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
This operation is not supported by directory buckets.
This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE action requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
:
Removes an object from a bucket. The behavior depends on the bucket's versioning state:
If bucket versioning is not enabled, the operation permanently deletes the object.
If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId
in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.
If bucket versioning is suspended, the operation removes the object that has a null versionId
, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId
, and all versions of the object have a versionId
, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId
, you must include the object’s versionId
in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null
value of the version ID is supported by directory buckets. You can only specify null
to the versionId
query parameter in the request.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
To remove a specific version, you must use the versionId
query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker
to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.
Directory buckets - MFA delete is not supported by directory buckets.
You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
Directory buckets - S3 Lifecycle is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always have the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following action is related to DeleteObject
:
This operation is not supported by directory buckets.
Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.
To use this operation, you must have permission to perform the s3:DeleteObjectTagging
action.
To delete tags of a specific object version, add the versionId
query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging
action.
The following operations are related to DeleteObjectTagging
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to DeleteObjects
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.
Directory buckets - MFA delete is not supported by directory buckets.
General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects
request includes specific headers.
s3:DeleteObject
- To delete an object from a bucket, you must always specify the s3:DeleteObject
permission.
s3:DeleteObjectVersion
- To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
permission.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32
, x-amz-checksum-crc32c
, x-amz-checksum-sha1
, or x-amz-checksum-sha256
) is required for all Multi-Object Delete requests.
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to DeleteObjects
:
This operation is not supported by directory buckets.
Removes the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to DeletePublicAccessBlock
:
This operation is not supported by directory buckets.
This implementation of the GET action uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon S3 User Guide.
The following operations are related to GetBucketAccelerateConfiguration
:
This operation is not supported by directory buckets.
This implementation of the GET
action uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have the READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.
When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. For more information about InvalidAccessPointAliasError
, see List of Error Codes.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
The following operations are related to GetBucketAcl
:
This operation is not supported by directory buckets.
Restores an archived copy of an object back into Amazon S3
This functionality is not supported for Amazon S3 on Outposts.
This action performs the following types of requests:
restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon S3 User Guide
Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object, you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special errors:
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
The following operations are related to RestoreObject
:
This operation is not supported by directory buckets.
This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This functionality is not supported for Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.
You must have the s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response.
The SelectObjectContent
action does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
The GLACIER
, DEEP_ARCHIVE
, and REDUCED_REDUNDANCY
storage classes, or the ARCHIVE_ACCESS
and DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class: You cannot query objects in the GLACIER
, DEEP_ARCHIVE
, or REDUCED_REDUNDANCY
storage classes, nor objects in the ARCHIVE_ACCESS
or DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide.
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
The following operations are related to SelectObjectContent
:
Uploads a part in a multipart upload.
In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
General purpose bucket permissions - For information on the permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession
API operation for session-based authorization. Specifically, you grant the s3express:CreateSession
permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession
API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession
API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession
.
General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.
General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C).
Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
Directory bucket - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPart
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object . By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source
in your request. To specify a byte range, you add the request header x-amz-copy-source-range
in your request.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.
All UploadPartCopy
requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed. For more information, see REST Authentication.
Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy
API operation, instead of using the temporary security credentials through the CreateSession
API operation.
Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.
You must have READ
access to the source object and WRITE
access to the destination bucket.
General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy
operation.
If the source object is in a general purpose bucket, you must have the s3:GetObject
permission to read the source object that is being copied.
If the destination bucket is a general purpose bucket, you must have the s3:PutObject
permission to write the object copy to the destination bucket.
For information about permissions required to use the multipart upload API, see Multipart upload API and permissions in the Amazon S3 User Guide.
Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy
operation.
If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to read the object. By default, the session is in the ReadWrite
mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode
condition key to ReadOnly
on the copy source bucket.
If the copy destination is a directory bucket, you must have the s3express:CreateSession
permission in the Action
element of a policy to write the object to the destination. The s3express:SessionMode
condition key cannot be set to ReadOnly
on the copy destination.
For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.
General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256
) is supported.
Error Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Error Code: InvalidRequest
Description: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com
.
The following operations are related to UploadPartCopy
:
This operation is not supported by directory buckets.
Passes transformed objects to a GetObject
operation when using Object Lambda access points. For information about Object Lambda access points, see Transforming objects with Object Lambda access points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to RequestRoute
, RequestToken
, StatusCode
, ErrorCode
, and ErrorMessage
. The GetObject
response metadata is supported so that the WriteGetObjectResponse
caller, typically an Lambda function, can provide the same metadata when it internally invokes GetObject
. When WriteGetObjectResponse
is called by a customer-owned Lambda function, the metadata returned to the end user GetObject
call might differ from what Amazon S3 would normally return.
You can include any number of metadata headers. When including a metadata header, it should be prefaced with x-amz-meta
. For example, x-amz-meta-my-custom-header: MyCustomValue
. The primary use case for this is to forward GetObject
metadata.
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact personally identifiable information (PII) and decompress S3 objects. These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point.
Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
For information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.
" }, "shapes": { @@ -1404,10 +1404,10 @@ "ListMultipartUploadsRequest$EncodingType": null, "ListObjectVersionsOutput$EncodingType": "Encoding type used by Amazon S3 to encode object key names in the XML response.
If you specify the encoding-type
request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:
KeyMarker, NextKeyMarker, Prefix, Key
, and Delimiter
.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Encoding type used by Amazon S3 to encode object key names in the XML response.
If you specify the encoding-type
request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:
Delimiter, Prefix, Key,
and StartAfter
.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
Encoding type used by Amazon S3 to encode object keys in the response. If using url
, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png
will appear as test_file%283%29.png
.
Sets the Cache-Control
header of the response.
Sets the Cache-Control
header of the response.
Sets the Cache-Control
header of the response.
Sets the Content-Disposition
header of the response.
Sets the Content-Disposition
header of the response.
Sets the Content-Disposition
header of the response.
Sets the Content-Encoding
header of the response.
Sets the Content-Encoding
header of the response.
Sets the Content-Encoding
header of the response.
Sets the Content-Language
header of the response.
Sets the Content-Language
header of the response.
Sets the Content-Language
header of the response.
Sets the Content-Type
header of the response.
Sets the Content-Type
header of the response.
Sets the Content-Type
header of the response.
Sets the Expires
header of the response.
Sets the Expires
header of the response.
Sets the Expires
header of the response.
The established temporary security credentials of the session.
Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint APIs on directory buckets.
The established temporary security credentials for the created session..
" + "CreateSessionOutput$Credentials": "The established temporary security credentials for the created session.
" } }, "SessionExpiration": { @@ -4225,7 +4231,7 @@ "Suffix": { "base": null, "refs": { - "IndexDocument$Suffix": "A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
A suffix that is appended to a request that is for a directory on the website endpoint. (For example, if the suffix is index.html
and you make a request to samplebucket/images/
, the data that is returned will be for the object with the key name images/index.html
.) The suffix must not be empty and must not include a slash character.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
", - "NoncurrentVersionTransition$NewerNoncurrentVersions": "Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" + "NoncurrentVersionExpiration$NewerNoncurrentVersions": "Specifies how many noncurrent versions Amazon S3 will retain. You can specify up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
", + "NoncurrentVersionTransition$NewerNoncurrentVersions": "Specifies how many noncurrent versions Amazon S3 will retain in the same storage class before transitioning objects. You can specify up to 100 noncurrent versions to retain. Amazon S3 will transition any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.
" } }, "VersionIdMarker": { diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index e2c4125fd6d..e8572583fc3 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -84,10 +84,13 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "CreateBucketConfiguration": { + "LocationConstraint": "eu-west-1" + } }, "output": { - "Location": "/examplebucket" + "Location": "http://examplebucket.