diff --git a/Cargo.toml b/Cargo.toml index cc650cab0d2c..7533bfb8fac1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,12 +2,12 @@ resolver = "2" exclude = [ "examples/cross_service", + "examples/lambda", + "examples/test-utils", "examples/webassembly", "examples/examples", - "examples/test-utils", - "examples/lambda", - "tests/webassembly", - "tests/no-default-features" + "tests/no-default-features", + "tests/webassembly" ] members = [ "sdk/accessanalyzer", diff --git a/aws-models/s3.json b/aws-models/s3.json index fc155a8427ae..eb0e8e0f028d 100644 --- a/aws-models/s3.json +++ b/aws-models/s3.json @@ -17661,6 +17661,12 @@ "traits": { "smithy.api#documentation": "

Date the bucket was created. This date can change when making changes to your bucket,\n such as editing its bucket policy.

" } + }, + "BucketRegion": { + "target": "com.amazonaws.s3#BucketRegion", + "traits": { + "smithy.api#documentation": "

\n BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response.

" + } } }, "traits": { @@ -17984,6 +17990,9 @@ "com.amazonaws.s3#BucketName": { "type": "string" }, + "com.amazonaws.s3#BucketRegion": { + "type": "string" + }, "com.amazonaws.s3#BucketType": { "type": "enum", "members": { @@ -18987,7 +18996,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

\n

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a different default encryption\n configuration, Amazon S3 uses\n the corresponding encryption key to encrypt the target\n object copy.

\n

With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. For more information about server-side encryption, see Using\n Server-Side Encryption in the\n Amazon S3 User Guide.

\n

\n General purpose buckets \n

\n \n

\n Directory buckets \n

\n ", + "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

\n

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a different default encryption\n configuration, Amazon S3 uses\n the corresponding encryption key to encrypt the target\n object copy.

\n

With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. For more information about server-side encryption, see Using\n Server-Side Encryption in the\n Amazon S3 User Guide.

\n

\n General purpose buckets \n

\n \n

\n Directory buckets \n

\n ", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -19029,7 +19038,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3) isn't supported. \n

", + "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the \n x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS \n symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. \n If you want to specify the \n x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS \n customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -19709,7 +19718,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.

\n

\n General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3) isn't supported. \n

", + "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.

\n

\n General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the \n x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS \n symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. \n If you want to specify the \n x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS \n customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -19794,7 +19803,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. \n For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see \n S3 Express One Zone APIs in the Amazon S3 User Guide. \n

\n

To make Zonal endpoint API requests on a directory bucket, use the CreateSession\n API operation. Specifically, you grant s3express:CreateSession permission to a\n bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the\n CreateSession API request on the bucket, which returns temporary security\n credentials that include the access key ID, secret access key, session token, and\n expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After\n the session is created, you don’t need to use other policies to grant permissions to each\n Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by\n applying the temporary security credentials of the session to the request headers and\n following the SigV4 protocol for authentication. You also apply the session token to the\n x-amz-s3session-token request header for authorization. Temporary security\n credentials are scoped to the bucket and expire after 5 minutes. After the expiration time,\n any calls that you make with those credentials will fail. You must use IAM credentials\n again to make a CreateSession API request that generates a new set of\n temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond\n the original specified interval.

\n

If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid\n service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to\n initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the\n Amazon S3 User Guide.

\n \n \n \n
\n
Permissions
\n
\n

To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that\n grants s3express:CreateSession permission to the bucket. In a\n policy, you can have the s3express:SessionMode condition key to\n control who can create a ReadWrite or ReadOnly session.\n For more information about ReadWrite or ReadOnly\n sessions, see \n x-amz-create-session-mode\n . For example policies, see\n Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

\n

To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession permission.

\n

If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key.

\n
\n
Encryption
\n
\n

For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

\n

For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, \nyou authenticate and authorize requests through CreateSession for low latency. \n To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.

\n \n

\n Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported. \n After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.\n

\n
\n

In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, \n you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. \n

\n \n

When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. \n Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n it's not supported to override the values of the encryption settings from the CreateSession request. \n\n

\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
", + "smithy.api#documentation": "

Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. \n For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see \n S3 Express One Zone APIs in the Amazon S3 User Guide. \n

\n

To make Zonal endpoint API requests on a directory bucket, use the CreateSession\n API operation. Specifically, you grant s3express:CreateSession permission to a\n bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the\n CreateSession API request on the bucket, which returns temporary security\n credentials that include the access key ID, secret access key, session token, and\n expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After\n the session is created, you don’t need to use other policies to grant permissions to each\n Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by\n applying the temporary security credentials of the session to the request headers and\n following the SigV4 protocol for authentication. You also apply the session token to the\n x-amz-s3session-token request header for authorization. Temporary security\n credentials are scoped to the bucket and expire after 5 minutes. After the expiration time,\n any calls that you make with those credentials will fail. You must use IAM credentials\n again to make a CreateSession API request that generates a new set of\n temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond\n the original specified interval.

\n

If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid\n service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to\n initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the\n Amazon S3 User Guide.

\n \n \n \n
\n
Permissions
\n
\n

To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that\n grants s3express:CreateSession permission to the bucket. In a\n policy, you can have the s3express:SessionMode condition key to\n control who can create a ReadWrite or ReadOnly session.\n For more information about ReadWrite or ReadOnly\n sessions, see \n x-amz-create-session-mode\n . For example policies, see\n Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

\n

To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession permission.

\n

If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key.

\n
\n
Encryption
\n
\n

For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

\n

For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, \nyou authenticate and authorize requests through CreateSession for low latency. \n To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.

\n \n

\n Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported. \n After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.\n

\n
\n

In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, \n you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. \n

\n \n

When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. \n Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n it's not supported to override the values of the encryption settings from the CreateSession request. \n\n

\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?session", @@ -19848,7 +19857,8 @@ } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#xmlName": "CreateSessionResult" } }, "com.amazonaws.s3#CreateSessionRequest": { @@ -19882,7 +19892,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If you specify x-amz-server-side-encryption with aws:kms, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same\n account that't issuing the command, you must use the full Key ARN not the Key ID.

\n

Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3) isn't supported. \n

", + "smithy.api#documentation": "

If you specify x-amz-server-side-encryption with aws:kms, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same\n account that't issuing the command, you must use the full Key ARN not the Key ID.

\n

Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -20782,7 +20792,7 @@ "target": "com.amazonaws.s3#DeleteObjectOutput" }, "traits": { - "smithy.api#documentation": "

Removes an object from a bucket. The behavior depends on the bucket's versioning state:

\n \n \n \n \n

To remove a specific version, you must use the versionId query parameter. Using this\n query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3\n sets the response header x-amz-delete-marker to true.

\n

If the object you want to delete is in a bucket where the bucket versioning\n configuration is MFA Delete enabled, you must include the x-amz-mfa request\n header in the DELETE versionId request. Requests that include\n x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3\n User Guide. To see sample\n requests that use versioning, see Sample\n Request.

\n \n

\n Directory buckets - MFA delete is not supported by directory buckets.

\n
\n

You can delete objects by explicitly calling DELETE Object or calling \n (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block\n users or accounts from removing or deleting objects from your bucket, you must deny them\n the s3:DeleteObject, s3:DeleteObjectVersion, and\n s3:PutLifeCycleConfiguration actions.

\n \n

\n Directory buckets - S3 Lifecycle is not supported by directory buckets.

\n
\n
\n
Permissions
\n
\n \n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following action is related to DeleteObject:

\n ", + "smithy.api#documentation": "

Removes an object from a bucket. The behavior depends on the bucket's versioning state.\n For more information, see Best\n practices to consider before deleting an object.

\n

To remove a specific version, you must use the versionId query parameter.\n Using this query parameter permanently deletes the version. If the object deleted is a\n delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If\n the object you want to delete is in a bucket where the bucket versioning configuration is\n MFA delete enabled, you must include the x-amz-mfa request header in the\n DELETE versionId request. Requests that include x-amz-mfa must\n use HTTPS. For more information about MFA delete and to see example requests, see Using MFA\n delete and Sample\n request in the Amazon S3 User Guide.

\n \n \n \n
\n
Permissions
\n
\n \n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following action is related to DeleteObject:

\n ", "smithy.api#examples": [ { "title": "To delete an object (from a non-versioned bucket)", @@ -23526,7 +23536,8 @@ } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#xmlName": "GetObjectAttributesResponse" } }, "com.amazonaws.s3#GetObjectAttributesParts": { @@ -26540,6 +26551,12 @@ "traits": { "smithy.api#documentation": "

\n ContinuationToken is included in the\n response when there are more buckets that can be listed with pagination. The next ListBuckets request to Amazon S3 can be continued with this ContinuationToken. ContinuationToken is obfuscated and is not a real bucket.

" } + }, + "Prefix": { + "target": "com.amazonaws.s3#Prefix", + "traits": { + "smithy.api#documentation": "

If Prefix was sent with the request, it is included in the response.

\n

All bucket names in the response begin with the specified bucket name prefix.

" + } } }, "traits": { @@ -26563,6 +26580,20 @@ "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken is obfuscated and is not a real\n key. You can use this ContinuationToken for pagination of the list results.

\n

Length Constraints: Minimum length of 0. Maximum length of 1024.

\n

Required: No.

", "smithy.api#httpQuery": "continuation-token" } + }, + "Prefix": { + "target": "com.amazonaws.s3#Prefix", + "traits": { + "smithy.api#documentation": "

Limits the response to bucket names that begin with the specified bucket name prefix.

", + "smithy.api#httpQuery": "prefix" + } + }, + "BucketRegion": { + "target": "com.amazonaws.s3#BucketRegion", + "traits": { + "smithy.api#documentation": "

Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

\n \n

Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

\n
", + "smithy.api#httpQuery": "bucket-region" + } } }, "traits": { @@ -26614,7 +26645,8 @@ } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#xmlName": "ListAllMyDirectoryBucketsResult" } }, "com.amazonaws.s3#ListDirectoryBucketsRequest": { @@ -29233,7 +29265,7 @@ "RestrictPublicBuckets": { "target": "com.amazonaws.s3#Setting", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has\n a public policy.

\n

Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.

", + "smithy.api#documentation": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has\n a public policy.

\n

Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.

", "smithy.api#xmlName": "RestrictPublicBuckets" } } @@ -29632,7 +29664,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "

This operation configures default encryption \n and Amazon S3 Bucket Keys for an existing bucket.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3).

\n \n \n \n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n

Also, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n
\n
Permissions
\n
\n \n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to PutBucketEncryption:

\n ", + "smithy.api#documentation": "

This operation configures default encryption \n and Amazon S3 Bucket Keys for an existing bucket.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3).

\n \n \n \n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n

Also, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n
\n
Permissions
\n
\n \n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to PutBucketEncryption:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?encryption", @@ -31477,7 +31509,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

\n \n

The Content-MD5 header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information about Amazon S3 Object Lock, see Amazon S3 Object Lock\n Overview in the Amazon S3 User Guide.

\n
\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information, see Uploading objects to an Object Lock enabled bucket\n in the Amazon S3 User Guide.

\n
\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -31491,7 +31523,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n \n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n \n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information, see Uploading objects to an Object Lock enabled bucket\n in the Amazon S3 User Guide.

\n
\n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -31628,7 +31660,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.

\n

\n General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nAmazon Web Services managed key (aws/s3) isn't supported. \n

", + "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.

\n

\n General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the \n x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS \n symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. \n If you want to specify the \n x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS \n customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -32554,7 +32586,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n \n

The SELECT job type for the RestoreObject operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Restores an archived copy of an object back into Amazon S3

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

This action performs the following types of requests:

\n \n

For more information about the S3 structure in the request body, see the\n following:

\n \n
\n
Permissions
\n
\n

To use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n
\n
Restoring objects
\n
\n

Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.

\n

To restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.

\n

When restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:

\n \n

For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.

\n

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.

\n

To get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.

\n

After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.

\n

If your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.

\n
\n
Responses
\n
\n

A successful action returns either the 200 OK or 202\n Accepted status code.

\n \n \n
\n
\n

The following operations are related to RestoreObject:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Restores an archived copy of an object back into Amazon S3

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

This action performs the following types of requests:

\n \n

For more information about the S3 structure in the request body, see the\n following:

\n \n
\n
Permissions
\n
\n

To use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n
\n
Restoring objects
\n
\n

Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.

\n

To restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.

\n

When restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:

\n \n

For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.

\n

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.

\n

To get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.

\n

After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.

\n

If your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.

\n
\n
Responses
\n
\n

A successful action returns either the 200 OK or 202\n Accepted status code.

\n \n \n
\n
\n

The following operations are related to RestoreObject:

\n ", "smithy.api#examples": [ { "title": "To restore an archived object", @@ -32682,7 +32714,7 @@ "Type": { "target": "com.amazonaws.s3#RestoreRequestType", "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Type of restore request.

" + "smithy.api#documentation": "

Type of restore request.

" } }, "Tier": { @@ -32700,7 +32732,7 @@ "SelectParameters": { "target": "com.amazonaws.s3#SelectParameters", "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Describes the parameters for Select job types.

" + "smithy.api#documentation": "

Describes the parameters for Select job types.

" } }, "OutputLocation": { @@ -32925,7 +32957,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n \n

The SelectObjectContent operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the operation as usual. Learn more\n

\n
\n

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

\n

\n
\n
Permissions
\n
\n

You must have the s3:GetObject permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.

\n
\n
Object Data Formats
\n
\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n \n
\n
Working with the Response Body
\n
\n

Given the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding header with\n chunked as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.

\n
\n
GetObject Support
\n
\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n \n
\n
Special Errors
\n
\n

For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n

\n
\n
\n

The following operations are related to SelectObjectContent:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

\n

\n
\n
Permissions
\n
\n

You must have the s3:GetObject permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.

\n
\n
Object Data Formats
\n
\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n \n
\n
Working with the Response Body
\n
\n

Given the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding header with\n chunked as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.

\n
\n
GetObject Support
\n
\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n \n
\n
Special Errors
\n
\n

For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n

\n
\n
\n

The following operations are related to SelectObjectContent:

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}?select&select-type=2", @@ -33079,7 +33111,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Learn Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Request to filter the contents of an Amazon S3 object based on a simple Structured Query\n Language (SQL) statement. In the request, along with the SQL expression, you must specify a\n data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data\n into records. It returns only records that match the specified SQL expression. You must\n also specify the data serialization format for the response. For more information, see\n S3Select API Documentation.

", + "smithy.api#documentation": "

Request to filter the contents of an Amazon S3 object based on a simple Structured Query\n Language (SQL) statement. In the request, along with the SQL expression, you must specify a\n data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data\n into records. It returns only records that match the specified SQL expression. You must\n also specify the data serialization format for the response. For more information, see\n S3Select API Documentation.

", "smithy.api#input": {} } }, @@ -33103,7 +33135,7 @@ "Expression": { "target": "com.amazonaws.s3#Expression", "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

The expression that is used to query the object.

", + "smithy.api#documentation": "

The expression that is used to query the object.

", "smithy.api#required": {} } }, @@ -33116,7 +33148,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Describes the parameters for Select job types.

\n

Learn How to optimize querying your data in Amazon S3 using\n Amazon Athena, S3 Object Lambda, or client-side filtering.

" + "smithy.api#documentation": "

Describes the parameters for Select job types.

" } }, "com.amazonaws.s3#ServerSideEncryption": { @@ -33160,7 +33192,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. For more\n information, see PutBucketEncryption.

\n \n \n " + "smithy.api#documentation": "

Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. For more\n information, see PutBucketEncryption.

\n \n \n " } }, "com.amazonaws.s3#ServerSideEncryptionConfiguration": { diff --git a/examples/cross_service/detect_faces/Cargo.toml b/examples/cross_service/detect_faces/Cargo.toml index 1e7290515416..b66924799ce4 100644 --- a/examples/cross_service/detect_faces/Cargo.toml +++ b/examples/cross_service/detect_faces/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] aws-config= { version = "1.5.8", path = "../../../sdk/aws-config" } aws-sdk-rekognition= { version = "1.49.0", path = "../../../sdk/rekognition" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-smithy-types= { version = "1.2.7", path = "../../../sdk/aws-smithy-types", features = ["rt-tokio"] } [dependencies.tokio] diff --git a/examples/cross_service/detect_labels/Cargo.toml b/examples/cross_service/detect_labels/Cargo.toml index 67bdf0ded2ce..415192e73b12 100644 --- a/examples/cross_service/detect_labels/Cargo.toml +++ b/examples/cross_service/detect_labels/Cargo.toml @@ -10,7 +10,7 @@ kamadak-exif = "0.5.4" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config" } aws-sdk-dynamodb= { version = "1.50.0", path = "../../../sdk/dynamodb" } aws-sdk-rekognition= { version = "1.49.0", path = "../../../sdk/rekognition" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-smithy-types= { version = "1.2.7", path = "../../../sdk/aws-smithy-types", features = ["rt-tokio"] } aws-types= { version = "1.3.3", path = "../../../sdk/aws-types" } diff --git a/examples/cross_service/photo_asset_management/Cargo.toml b/examples/cross_service/photo_asset_management/Cargo.toml index 0aea01eab78d..67488aa210fe 100644 --- a/examples/cross_service/photo_asset_management/Cargo.toml +++ b/examples/cross_service/photo_asset_management/Cargo.toml @@ -33,7 +33,7 @@ tracing = "0.1.37" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config" } aws-sdk-dynamodb= { version = "1.50.0", path = "../../../sdk/dynamodb" } aws-sdk-rekognition= { version = "1.49.0", path = "../../../sdk/rekognition" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-sdk-sns= { version = "1.47.0", path = "../../../sdk/sns" } aws-smithy-runtime= { version = "1.7.2", path = "../../../sdk/aws-smithy-runtime" } aws-smithy-types-convert= { version = "0.60.8", path = "../../../sdk/aws-smithy-types-convert", features = ["convert-chrono"] } diff --git a/examples/cross_service/telephone/Cargo.toml b/examples/cross_service/telephone/Cargo.toml index b037ee81b86a..40573bf8f214 100644 --- a/examples/cross_service/telephone/Cargo.toml +++ b/examples/cross_service/telephone/Cargo.toml @@ -13,7 +13,7 @@ reqwest = "0.11.4" serde_json = "1.0" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config" } aws-sdk-polly= { version = "1.48.0", path = "../../../sdk/polly" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-sdk-transcribe= { version = "1.47.0", path = "../../../sdk/transcribe" } aws-smithy-types= { version = "1.2.7", path = "../../../sdk/aws-smithy-types", features = ["rt-tokio"] } diff --git a/examples/examples/concurrency/Cargo.toml b/examples/examples/concurrency/Cargo.toml index 86bf575b9451..6c97688a61ae 100644 --- a/examples/examples/concurrency/Cargo.toml +++ b/examples/examples/concurrency/Cargo.toml @@ -26,5 +26,5 @@ features = ["env-filter"] [dev-dependencies] fastrand = "1.8.0" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-sdk-sqs= { version = "1.46.0", path = "../../../sdk/sqs" } diff --git a/examples/examples/custom-root-certificates/Cargo.toml b/examples/examples/custom-root-certificates/Cargo.toml index 624b8f45f95c..4c5f0f410e0a 100644 --- a/examples/examples/custom-root-certificates/Cargo.toml +++ b/examples/examples/custom-root-certificates/Cargo.toml @@ -10,7 +10,7 @@ publish = false [dependencies] rustls = "0.21.9" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3", default-features = false } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3", default-features = false } aws-smithy-runtime= { version = "1.7.2", path = "../../../sdk/aws-smithy-runtime", features = ["tls-rustls"] } [dependencies.tokio] diff --git a/examples/examples/glue/Cargo.toml b/examples/examples/glue/Cargo.toml index 5e8fb7740391..7a9173bfabb4 100644 --- a/examples/examples/glue/Cargo.toml +++ b/examples/examples/glue/Cargo.toml @@ -20,7 +20,7 @@ futures = "0.3.25" tracing-bunyan-formatter = "0.3.4" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } aws-sdk-glue= { version = "1.66.0", path = "../../../sdk/glue" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-http= { version = "0.60.6", path = "../../../sdk/aws-http" } aws-smithy-types= { version = "1.2.7", path = "../../../sdk/aws-smithy-types" } aws-types= { version = "1.3.3", path = "../../../sdk/aws-types" } diff --git a/examples/examples/iam/Cargo.toml b/examples/examples/iam/Cargo.toml index 709e8c6d51fa..9323a07ccbcb 100644 --- a/examples/examples/iam/Cargo.toml +++ b/examples/examples/iam/Cargo.toml @@ -24,7 +24,7 @@ lazy_static = "1.4.0" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } aws-credential-types= { version = "1.2.1", path = "../../../sdk/aws-credential-types", features = ["hardcoded-credentials"] } aws-sdk-iam= { version = "1.48.0", path = "../../../sdk/iam" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-sdk-sts= { version = "1.46.0", path = "../../../sdk/sts" } [dependencies.sdk-examples-test-utils] diff --git a/examples/examples/lambda/Cargo.toml b/examples/examples/lambda/Cargo.toml index e61c838c2169..1046a46f515b 100644 --- a/examples/examples/lambda/Cargo.toml +++ b/examples/examples/lambda/Cargo.toml @@ -15,7 +15,7 @@ aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = [" aws-sdk-ec2= { version = "1.78.0", path = "../../../sdk/ec2" } aws-sdk-iam= { version = "1.48.0", path = "../../../sdk/iam" } aws-sdk-lambda= { version = "1.51.0", path = "../../../sdk/lambda" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-smithy-types= { version = "1.2.7", path = "../../../sdk/aws-smithy-types" } aws-types= { version = "1.3.3", path = "../../../sdk/aws-types" } diff --git a/examples/examples/localstack/Cargo.toml b/examples/examples/localstack/Cargo.toml index 85a483a8a48a..7c3437372ce5 100644 --- a/examples/examples/localstack/Cargo.toml +++ b/examples/examples/localstack/Cargo.toml @@ -7,7 +7,7 @@ publish = false [dependencies] aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } [dependencies.tokio] version = "1" diff --git a/examples/examples/s3/Cargo.toml b/examples/examples/s3/Cargo.toml index f566d326d087..baac01077bcd 100644 --- a/examples/examples/s3/Cargo.toml +++ b/examples/examples/s3/Cargo.toml @@ -24,7 +24,7 @@ tracing = "0.1.37" serde_json = "1" chrono = "0.4.38" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3", features = ["rt-tokio"] } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3", features = ["rt-tokio"] } aws-smithy-runtime= { version = "1.7.2", path = "../../../sdk/aws-smithy-runtime" } aws-smithy-runtime-api= { version = "1.7.2", path = "../../../sdk/aws-smithy-runtime-api", features = ["client"] } aws-smithy-types= { version = "1.2.7", path = "../../../sdk/aws-smithy-types", features = ["http-body-0-4-x"] } diff --git a/examples/examples/sdk-config/Cargo.toml b/examples/examples/sdk-config/Cargo.toml index 5b711d4b7123..79048dc2748e 100644 --- a/examples/examples/sdk-config/Cargo.toml +++ b/examples/examples/sdk-config/Cargo.toml @@ -12,7 +12,7 @@ async_once = "0.2.6" tracing = "0.1.37" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } aws-credential-types= { version = "1.2.1", path = "../../../sdk/aws-credential-types" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } [dependencies.tokio] version = "1.20.1" diff --git a/examples/examples/sending-presigned-requests/Cargo.toml b/examples/examples/sending-presigned-requests/Cargo.toml index 9904788b59e0..c26862f18264 100644 --- a/examples/examples/sending-presigned-requests/Cargo.toml +++ b/examples/examples/sending-presigned-requests/Cargo.toml @@ -12,7 +12,7 @@ hyper = "0.14" reqwest = "0.11" hyper-tls = "0.5.0" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } [dependencies.clap] version = "~4.4" diff --git a/examples/examples/testing/Cargo.toml b/examples/examples/testing/Cargo.toml index cb0b928212e4..359c49af72cb 100644 --- a/examples/examples/testing/Cargo.toml +++ b/examples/examples/testing/Cargo.toml @@ -17,7 +17,7 @@ mockall = "0.11.4" serde_json = "1" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config", features = ["behavior-version-latest"] } aws-credential-types= { version = "1.2.1", path = "../../../sdk/aws-credential-types", features = ["hardcoded-credentials"] } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } aws-smithy-types= { version = "1.2.7", path = "../../../sdk/aws-smithy-types" } aws-smithy-runtime= { version = "1.7.2", path = "../../../sdk/aws-smithy-runtime", features = ["test-util"] } aws-smithy-runtime-api= { version = "1.7.2", path = "../../../sdk/aws-smithy-runtime-api", features = ["test-util"] } diff --git a/examples/lambda/calculator/Cargo.toml b/examples/lambda/calculator/Cargo.toml index c714b045911a..6dfce514db9f 100644 --- a/examples/lambda/calculator/Cargo.toml +++ b/examples/lambda/calculator/Cargo.toml @@ -16,7 +16,7 @@ serde = "1.0.164" aws-config= { version = "1.5.8", path = "../../../sdk/aws-config" } aws-sdk-ec2= { version = "1.78.0", path = "../../../sdk/ec2" } aws-sdk-lambda= { version = "1.51.0", path = "../../../sdk/lambda" } -aws-sdk-s3= { version = "1.56.0", path = "../../../sdk/s3" } +aws-sdk-s3= { version = "1.57.0", path = "../../../sdk/s3" } [dependencies.clap] version = "~4.4" diff --git a/sdk/s3/Cargo.toml b/sdk/s3/Cargo.toml index 03f224fb9106..440b9b014036 100644 --- a/sdk/s3/Cargo.toml +++ b/sdk/s3/Cargo.toml @@ -1,7 +1,7 @@ # Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. [package] name = "aws-sdk-s3" -version = "1.56.0" +version = "1.57.0" authors = ["AWS Rust SDK Team ", "Russell Cohen "] description = "AWS SDK for Amazon Simple Storage Service" edition = "2021" diff --git a/sdk/s3/README.md b/sdk/s3/README.md index 1eeec32c3783..2525530d0c3a 100644 --- a/sdk/s3/README.md +++ b/sdk/s3/README.md @@ -12,7 +12,7 @@ your project, add the following to your **Cargo.toml** file: ```toml [dependencies] aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -aws-sdk-s3 = "1.56.0" +aws-sdk-s3 = "1.57.0" tokio = { version = "1", features = ["full"] } ``` diff --git a/sdk/s3/src/client/copy_object.rs b/sdk/s3/src/client/copy_object.rs index 7aa3787a8819..d28b29027b20 100644 --- a/sdk/s3/src/client/copy_object.rs +++ b/sdk/s3/src/client/copy_object.rs @@ -25,13 +25,13 @@ impl super::Client { /// - [`metadata(impl Into, impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::metadata) / [`set_metadata(Option>)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_metadata):
required: **false**

A map of metadata to store with the object in S3.


/// - [`metadata_directive(MetadataDirective)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::metadata_directive) / [`set_metadata_directive(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_metadata_directive):
required: **false**

Specifies whether the metadata is copied from the source object or replaced with metadata that's provided in the request. When copying an object, you can preserve all metadata (the default) or specify new metadata. If this header isn’t specified, COPY is the default behavior.

General purpose bucket - For general purpose buckets, when you grant permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Amazon S3 condition key examples in the Amazon S3 User Guide.

x-amz-website-redirect-location is unique to each object and is not copied when using the x-amz-metadata-directive header. To copy the value, you must specify x-amz-website-redirect-location in the request header.


/// - [`tagging_directive(TaggingDirective)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::tagging_directive) / [`set_tagging_directive(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_tagging_directive):
required: **false**

Specifies whether the object tag-set is copied from the source object or replaced with the tag-set that's provided in the request.

The default value is COPY.

Directory buckets - For directory buckets in a CopyObject operation, only the empty tag-set is supported. Any requests that attempt to write non-empty tags into directory buckets will receive a 501 Not Implemented status code. When the destination bucket is a directory bucket, you will receive a 501 Not Implemented response in any of the following situations:

  • When you attempt to COPY the tag-set from an S3 source object that has non-empty tags.

  • When you attempt to REPLACE the tag-set of a source object and set a non-empty value to x-amz-tagging.

  • When you don't set the x-amz-tagging-directive header and the source object has non-empty tags. This is because the default value of x-amz-tagging-directive is COPY.

Because only the empty tag-set is supported for directory buckets in a CopyObject operation, the following situations are allowed:

  • When you attempt to COPY the tag-set from a directory bucket source object that has no tags to a general purpose bucket. It copies an empty tag-set to the destination object.

  • When you attempt to REPLACE the tag-set of a directory bucket source object and set the x-amz-tagging value of the directory bucket destination object to empty.

  • When you attempt to REPLACE the tag-set of a general purpose bucket source object that has non-empty tags and set the x-amz-tagging value of the directory bucket destination object to empty.

  • When you attempt to REPLACE the tag-set of a directory bucket source object and don't set the x-amz-tagging value of the directory bucket destination object. This is because the default value of x-amz-tagging is the empty value.


- /// - [`server_side_encryption(ServerSideEncryption)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::server_side_encryption) / [`set_server_side_encryption(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_server_side_encryption):
required: **false**

The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy.

With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide.

General purpose buckets

  • For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

  • When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence.

Directory buckets

  • For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.


+ /// - [`server_side_encryption(ServerSideEncryption)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::server_side_encryption) / [`set_server_side_encryption(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_server_side_encryption):
required: **false**

The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy.

With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide.

General purpose buckets

  • For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

  • When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence.

Directory buckets

  • For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.


/// - [`storage_class(StorageClass)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::storage_class) / [`set_storage_class(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_storage_class):
required: **false**

If the x-amz-storage-class header is not used, the copied object will be stored in the STANDARD Storage Class by default. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class.

  • Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.

  • Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class.

You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 by using the x-amz-storage-class header. For more information, see Storage Classes in the Amazon S3 User Guide.

Before using an object as a source object for the copy operation, you must restore a copy of it if it meets any of the following conditions:

  • The storage class of the source object is GLACIER or DEEP_ARCHIVE.

  • The storage class of the source object is INTELLIGENT_TIERING and it's S3 Intelligent-Tiering access tier is Archive Access or Deep Archive Access.

For more information, see RestoreObject and Copying Objects in the Amazon S3 User Guide.


/// - [`website_redirect_location(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::website_redirect_location) / [`set_website_redirect_location(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_website_redirect_location):
required: **false**

If the destination bucket is configured as a website, redirects requests for this object copy to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. This value is unique to each object and is not copied when using the x-amz-metadata-directive header. Instead, you may opt to provide this header in combination with the x-amz-metadata-directive header.

This functionality is not supported for directory buckets.


/// - [`sse_customer_algorithm(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::sse_customer_algorithm) / [`set_sse_customer_algorithm(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_sse_customer_algorithm):
required: **false**

Specifies the algorithm to use when encrypting the object (for example, AES256).

When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence.

This functionality is not supported when the destination bucket is a directory bucket.


/// - [`sse_customer_key(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::sse_customer_key) / [`set_sse_customer_key(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_sse_customer_key):
required: **false**

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded. Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

This functionality is not supported when the destination bucket is a directory bucket.


/// - [`sse_customer_key_md5(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::sse_customer_key_md5) / [`set_sse_customer_key_md5(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_sse_customer_key_md5):
required: **false**

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

This functionality is not supported when the destination bucket is a directory bucket.


- /// - [`ssekms_key_id(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_ssekms_key_id):
required: **false**

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.


+ /// - [`ssekms_key_id(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_ssekms_key_id):
required: **false**

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.


/// - [`ssekms_encryption_context(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::ssekms_encryption_context) / [`set_ssekms_encryption_context(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_ssekms_encryption_context):
required: **false**

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for the destination object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

General purpose buckets - This value must be explicitly added to specify encryption context for CopyObject requests if you want an additional encryption context for your destination object. The additional encryption context of the source object won't be copied to the destination object. For more information, see Encryption context in the Amazon S3 User Guide.

Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.


/// - [`bucket_key_enabled(bool)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::bucket_key_enabled) / [`set_bucket_key_enabled(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_bucket_key_enabled):
required: **false**

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object.

Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.

For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.


/// - [`copy_source_sse_customer_algorithm(impl Into)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::copy_source_sse_customer_algorithm) / [`set_copy_source_sse_customer_algorithm(Option)`](crate::operation::copy_object::builders::CopyObjectFluentBuilder::set_copy_source_sse_customer_algorithm):
required: **false**

Specifies the algorithm to use when decrypting the source object (for example, AES256).

If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying.

This functionality is not supported when the source object is in a directory bucket.


diff --git a/sdk/s3/src/client/create_multipart_upload.rs b/sdk/s3/src/client/create_multipart_upload.rs index 499dadf1a15b..5474d69131a4 100644 --- a/sdk/s3/src/client/create_multipart_upload.rs +++ b/sdk/s3/src/client/create_multipart_upload.rs @@ -23,7 +23,7 @@ impl super::Client { /// - [`sse_customer_algorithm(impl Into)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::sse_customer_algorithm) / [`set_sse_customer_algorithm(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_sse_customer_algorithm):
required: **false**

Specifies the algorithm to use when encrypting the object (for example, AES256).

This functionality is not supported for directory buckets.


/// - [`sse_customer_key(impl Into)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::sse_customer_key) / [`set_sse_customer_key(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_sse_customer_key):
required: **false**

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

This functionality is not supported for directory buckets.


/// - [`sse_customer_key_md5(impl Into)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::sse_customer_key_md5) / [`set_sse_customer_key_md5(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_sse_customer_key_md5):
required: **false**

Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

This functionality is not supported for directory buckets.


- /// - [`ssekms_key_id(impl Into)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_ssekms_key_id):
required: **false**

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.


+ /// - [`ssekms_key_id(impl Into)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_ssekms_key_id):
required: **false**

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.


/// - [`ssekms_encryption_context(impl Into)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::ssekms_encryption_context) / [`set_ssekms_encryption_context(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_ssekms_encryption_context):
required: **false**

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.


/// - [`bucket_key_enabled(bool)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::bucket_key_enabled) / [`set_bucket_key_enabled(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_bucket_key_enabled):
required: **false**

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).

General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key.

Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.


/// - [`request_payer(RequestPayer)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::request_payer) / [`set_request_payer(Option)`](crate::operation::create_multipart_upload::builders::CreateMultipartUploadFluentBuilder::set_request_payer):
required: **false**

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.

This functionality is not supported for directory buckets.


diff --git a/sdk/s3/src/client/create_session.rs b/sdk/s3/src/client/create_session.rs index 4981312ab455..0b6a37b6bccf 100644 --- a/sdk/s3/src/client/create_session.rs +++ b/sdk/s3/src/client/create_session.rs @@ -6,7 +6,7 @@ impl super::Client { /// - [`session_mode(SessionMode)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::session_mode) / [`set_session_mode(Option)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::set_session_mode):
required: **false**

Specifies the mode of the session that will be created, either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session is capable of executing all the Zonal endpoint API operations on a directory bucket. A ReadOnly session is constrained to execute the following Zonal endpoint API operations: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads.


/// - [`bucket(impl Into)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::bucket) / [`set_bucket(Option)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::set_bucket):
required: **true**

The name of the bucket that you create a session for.


/// - [`server_side_encryption(ServerSideEncryption)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::server_side_encryption) / [`set_server_side_encryption(Option)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::set_server_side_encryption):
required: **false**

The server-side encryption algorithm to use when you store objects in the directory bucket.

For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.


- /// - [`ssekms_key_id(impl Into)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::set_ssekms_key_id):
required: **false**

If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.


+ /// - [`ssekms_key_id(impl Into)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::set_ssekms_key_id):
required: **false**

If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.


/// - [`ssekms_encryption_context(impl Into)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::ssekms_encryption_context) / [`set_ssekms_encryption_context(Option)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::set_ssekms_encryption_context):
required: **false**

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object.

General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.


/// - [`bucket_key_enabled(bool)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::bucket_key_enabled) / [`set_bucket_key_enabled(Option)`](crate::operation::create_session::builders::CreateSessionFluentBuilder::set_bucket_key_enabled):
required: **false**

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using KMS keys (SSE-KMS).

S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.


/// - On success, responds with [`CreateSessionOutput`](crate::operation::create_session::CreateSessionOutput) with field(s): diff --git a/sdk/s3/src/client/list_buckets.rs b/sdk/s3/src/client/list_buckets.rs index ac720d31ef37..01239aaec337 100644 --- a/sdk/s3/src/client/list_buckets.rs +++ b/sdk/s3/src/client/list_buckets.rs @@ -6,10 +6,13 @@ impl super::Client { /// - The fluent builder is configurable: /// - [`max_buckets(i32)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::max_buckets) / [`set_max_buckets(Option)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::set_max_buckets):
required: **false**

Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.


/// - [`continuation_token(impl Into)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::continuation_token) / [`set_continuation_token(Option)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::set_continuation_token):
required: **false**

ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results.

Length Constraints: Minimum length of 0. Maximum length of 1024.

Required: No.


+ /// - [`prefix(impl Into)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::prefix) / [`set_prefix(Option)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::set_prefix):
required: **false**

Limits the response to bucket names that begin with the specified bucket name prefix.


+ /// - [`bucket_region(impl Into)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::bucket_region) / [`set_bucket_region(Option)`](crate::operation::list_buckets::builders::ListBucketsFluentBuilder::set_bucket_region):
required: **false**

Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.


/// - On success, responds with [`ListBucketsOutput`](crate::operation::list_buckets::ListBucketsOutput) with field(s): /// - [`buckets(Option>)`](crate::operation::list_buckets::ListBucketsOutput::buckets):

The list of buckets owned by the requester.

/// - [`owner(Option)`](crate::operation::list_buckets::ListBucketsOutput::owner):

The owner of the buckets listed.

/// - [`continuation_token(Option)`](crate::operation::list_buckets::ListBucketsOutput::continuation_token):

ContinuationToken is included in the response when there are more buckets that can be listed with pagination. The next ListBuckets request to Amazon S3 can be continued with this ContinuationToken. ContinuationToken is obfuscated and is not a real bucket.

+ /// - [`prefix(Option)`](crate::operation::list_buckets::ListBucketsOutput::prefix):

If Prefix was sent with the request, it is included in the response.

All bucket names in the response begin with the specified bucket name prefix.

/// - On failure, responds with [`SdkError`](crate::operation::list_buckets::ListBucketsError) pub fn list_buckets(&self) -> crate::operation::list_buckets::builders::ListBucketsFluentBuilder { crate::operation::list_buckets::builders::ListBucketsFluentBuilder::new(self.handle.clone()) diff --git a/sdk/s3/src/client/put_object.rs b/sdk/s3/src/client/put_object.rs index 36e29dbf3eaa..d384ceb671ae 100644 --- a/sdk/s3/src/client/put_object.rs +++ b/sdk/s3/src/client/put_object.rs @@ -11,9 +11,9 @@ impl super::Client { /// - [`content_encoding(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::content_encoding) / [`set_content_encoding(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_content_encoding):
required: **false**

Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding.


/// - [`content_language(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::content_language) / [`set_content_language(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_content_language):
required: **false**

The language the content is in.


/// - [`content_length(i64)`](crate::operation::put_object::builders::PutObjectFluentBuilder::content_length) / [`set_content_length(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_content_length):
required: **false**

Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length.


- /// - [`content_md5(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::content_md5) / [`set_content_md5(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_content_md5):
required: **false**

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

This functionality is not supported for directory buckets.


+ /// - [`content_md5(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::content_md5) / [`set_content_md5(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_content_md5):
required: **false**

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

This functionality is not supported for directory buckets.


/// - [`content_type(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::content_type) / [`set_content_type(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_content_type):
required: **false**

A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type.


- /// - [`checksum_algorithm(ChecksumAlgorithm)`](crate::operation::put_object::builders::PutObjectFluentBuilder::checksum_algorithm) / [`set_checksum_algorithm(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_checksum_algorithm):
required: **false**

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list:

  • CRC32

  • CRC32C

  • SHA1

  • SHA256

For more information, see Checking object integrity in the Amazon S3 User Guide.

If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.


+ /// - [`checksum_algorithm(ChecksumAlgorithm)`](crate::operation::put_object::builders::PutObjectFluentBuilder::checksum_algorithm) / [`set_checksum_algorithm(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_checksum_algorithm):
required: **false**

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list:

  • CRC32

  • CRC32C

  • SHA1

  • SHA256

For more information, see Checking object integrity in the Amazon S3 User Guide.

If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.


/// - [`checksum_crc32(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::checksum_crc32) / [`set_checksum_crc32(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_checksum_crc32):
required: **false**

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.


/// - [`checksum_crc32_c(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::checksum_crc32_c) / [`set_checksum_crc32_c(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_checksum_crc32_c):
required: **false**

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.


/// - [`checksum_sha1(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::checksum_sha1) / [`set_checksum_sha1(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_checksum_sha1):
required: **false**

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.


@@ -32,7 +32,7 @@ impl super::Client { /// - [`sse_customer_algorithm(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::sse_customer_algorithm) / [`set_sse_customer_algorithm(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_sse_customer_algorithm):
required: **false**

Specifies the algorithm to use when encrypting the object (for example, AES256).

This functionality is not supported for directory buckets.


/// - [`sse_customer_key(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::sse_customer_key) / [`set_sse_customer_key(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_sse_customer_key):
required: **false**

Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header.

This functionality is not supported for directory buckets.


/// - [`sse_customer_key_md5(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::sse_customer_key_md5) / [`set_sse_customer_key_md5(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_sse_customer_key_md5):
required: **false**

Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.

This functionality is not supported for directory buckets.


- /// - [`ssekms_key_id(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_ssekms_key_id):
required: **false**

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.


+ /// - [`ssekms_key_id(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::ssekms_key_id) / [`set_ssekms_key_id(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_ssekms_key_id):
required: **false**

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.


/// - [`ssekms_encryption_context(impl Into)`](crate::operation::put_object::builders::PutObjectFluentBuilder::ssekms_encryption_context) / [`set_ssekms_encryption_context(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_ssekms_encryption_context):
required: **false**

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object.

General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.


/// - [`bucket_key_enabled(bool)`](crate::operation::put_object::builders::PutObjectFluentBuilder::bucket_key_enabled) / [`set_bucket_key_enabled(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_bucket_key_enabled):
required: **false**

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).

General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key.

Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.


/// - [`request_payer(RequestPayer)`](crate::operation::put_object::builders::PutObjectFluentBuilder::request_payer) / [`set_request_payer(Option)`](crate::operation::put_object::builders::PutObjectFluentBuilder::set_request_payer):
required: **false**

Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.

This functionality is not supported for directory buckets.


diff --git a/sdk/s3/src/lib.rs b/sdk/s3/src/lib.rs index 208eece6086f..9bb5ff2b195b 100644 --- a/sdk/s3/src/lib.rs +++ b/sdk/s3/src/lib.rs @@ -29,7 +29,7 @@ //! ```toml //! [dependencies] //! aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } -//! aws-sdk-s3 = "1.56.0" +//! aws-sdk-s3 = "1.57.0" //! tokio = { version = "1", features = ["full"] } //! ``` //! diff --git a/sdk/s3/src/operation/copy_object/_copy_object_input.rs b/sdk/s3/src/operation/copy_object/_copy_object_input.rs index 650b920e0987..103fe693ed0f 100644 --- a/sdk/s3/src/operation/copy_object/_copy_object_input.rs +++ b/sdk/s3/src/operation/copy_object/_copy_object_input.rs @@ -202,7 +202,7 @@ pub struct CopyObjectInput { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub server_side_encryption: ::std::option::Option, ///

    If the x-amz-storage-class header is not used, the copied object will be stored in the STANDARD Storage Class by default. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class.

    @@ -241,7 +241,7 @@ pub struct CopyObjectInput { /// pub sse_customer_key_md5: ::std::option::Option<::std::string::String>, ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub ssekms_key_id: ::std::option::Option<::std::string::String>, ///

    Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for the destination object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs.

    ///

    General purpose buckets - This value must be explicitly added to specify encryption context for CopyObject requests if you want an additional encryption context for your destination object. The additional encryption context of the source object won't be copied to the destination object. For more information, see Encryption context in the Amazon S3 User Guide.

    @@ -557,7 +557,7 @@ impl CopyObjectInput { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub fn server_side_encryption(&self) -> ::std::option::Option<&crate::types::ServerSideEncryption> { self.server_side_encryption.as_ref() @@ -608,7 +608,7 @@ impl CopyObjectInput { self.sse_customer_key_md5.as_deref() } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(&self) -> ::std::option::Option<&str> { self.ssekms_key_id.as_deref() } @@ -1559,7 +1559,7 @@ impl CopyObjectInputBuilder { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub fn server_side_encryption(mut self, input: crate::types::ServerSideEncryption) -> Self { self.server_side_encryption = ::std::option::Option::Some(input); @@ -1580,7 +1580,7 @@ impl CopyObjectInputBuilder { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub fn set_server_side_encryption(mut self, input: ::std::option::Option) -> Self { self.server_side_encryption = input; @@ -1601,7 +1601,7 @@ impl CopyObjectInputBuilder { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub fn get_server_side_encryption(&self) -> &::std::option::Option { &self.server_side_encryption @@ -1752,19 +1752,19 @@ impl CopyObjectInputBuilder { &self.sse_customer_key_md5 } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.ssekms_key_id = ::std::option::Option::Some(input.into()); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.ssekms_key_id = input; self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { &self.ssekms_key_id } diff --git a/sdk/s3/src/operation/copy_object/builders.rs b/sdk/s3/src/operation/copy_object/builders.rs index 7d98044c1503..a623caa0b926 100644 --- a/sdk/s3/src/operation/copy_object/builders.rs +++ b/sdk/s3/src/operation/copy_object/builders.rs @@ -945,7 +945,7 @@ impl CopyObjectFluentBuilder { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub fn server_side_encryption(mut self, input: crate::types::ServerSideEncryption) -> Self { self.inner = self.inner.server_side_encryption(input); @@ -966,7 +966,7 @@ impl CopyObjectFluentBuilder { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub fn set_server_side_encryption(mut self, input: ::std::option::Option) -> Self { self.inner = self.inner.set_server_side_encryption(input); @@ -987,7 +987,7 @@ impl CopyObjectFluentBuilder { ///
  • ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • - ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

  • + ///

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

    /// pub fn get_server_side_encryption(&self) -> &::std::option::Option { self.inner.get_server_side_encryption() @@ -1138,19 +1138,19 @@ impl CopyObjectFluentBuilder { self.inner.get_sse_customer_key_md5() } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.inner = self.inner.ssekms_key_id(input.into()); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.inner = self.inner.set_ssekms_key_id(input); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { self.inner.get_ssekms_key_id() } diff --git a/sdk/s3/src/operation/create_multipart_upload/_create_multipart_upload_input.rs b/sdk/s3/src/operation/create_multipart_upload/_create_multipart_upload_input.rs index 38453825999e..455ff85710a3 100644 --- a/sdk/s3/src/operation/create_multipart_upload/_create_multipart_upload_input.rs +++ b/sdk/s3/src/operation/create_multipart_upload/_create_multipart_upload_input.rs @@ -242,7 +242,7 @@ pub struct CreateMultipartUploadInput { pub sse_customer_key_md5: ::std::option::Option<::std::string::String>, ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub ssekms_key_id: ::std::option::Option<::std::string::String>, ///

    Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

    ///

    Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

    @@ -556,7 +556,7 @@ impl CreateMultipartUploadInput { } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(&self) -> ::std::option::Option<&str> { self.ssekms_key_id.as_deref() } @@ -1570,21 +1570,21 @@ impl CreateMultipartUploadInputBuilder { } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.ssekms_key_id = ::std::option::Option::Some(input.into()); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.ssekms_key_id = input; self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { &self.ssekms_key_id } diff --git a/sdk/s3/src/operation/create_multipart_upload/builders.rs b/sdk/s3/src/operation/create_multipart_upload/builders.rs index 5b5e3552e11c..2d806c4c1d51 100644 --- a/sdk/s3/src/operation/create_multipart_upload/builders.rs +++ b/sdk/s3/src/operation/create_multipart_upload/builders.rs @@ -1085,21 +1085,21 @@ impl CreateMultipartUploadFluentBuilder { } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.inner = self.inner.ssekms_key_id(input.into()); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.inner = self.inner.set_ssekms_key_id(input); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { self.inner.get_ssekms_key_id() } diff --git a/sdk/s3/src/operation/create_session/_create_session_input.rs b/sdk/s3/src/operation/create_session/_create_session_input.rs index e539151f794c..d71fe3a61643 100644 --- a/sdk/s3/src/operation/create_session/_create_session_input.rs +++ b/sdk/s3/src/operation/create_session/_create_session_input.rs @@ -11,7 +11,7 @@ pub struct CreateSessionInput { ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

    pub server_side_encryption: ::std::option::Option, ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub ssekms_key_id: ::std::option::Option<::std::string::String>, ///

    Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object.

    ///

    General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

    @@ -36,7 +36,7 @@ impl CreateSessionInput { self.server_side_encryption.as_ref() } ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(&self) -> ::std::option::Option<&str> { self.ssekms_key_id.as_deref() } @@ -130,19 +130,19 @@ impl CreateSessionInputBuilder { &self.server_side_encryption } ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.ssekms_key_id = ::std::option::Option::Some(input.into()); self } ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.ssekms_key_id = input; self } ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { &self.ssekms_key_id } diff --git a/sdk/s3/src/operation/create_session/builders.rs b/sdk/s3/src/operation/create_session/builders.rs index 02f0271828cd..97261dc3d661 100644 --- a/sdk/s3/src/operation/create_session/builders.rs +++ b/sdk/s3/src/operation/create_session/builders.rs @@ -49,7 +49,7 @@ impl crate::operation::create_session::builders::CreateSessionInputBuilder { ///
    ///

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

    ///

    For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.

    -///

    Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.

    +///

    Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.

    ///
    ///

    In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

    ///

    When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession request.

    @@ -193,19 +193,19 @@ impl CreateSessionFluentBuilder { self.inner.get_server_side_encryption() } ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.inner = self.inner.ssekms_key_id(input.into()); self } ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.inner = self.inner.set_ssekms_key_id(input); self } ///

    If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID.

    - ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { self.inner.get_ssekms_key_id() } diff --git a/sdk/s3/src/operation/delete_object/builders.rs b/sdk/s3/src/operation/delete_object/builders.rs index b703761a369b..b5ca65326ccb 100644 --- a/sdk/s3/src/operation/delete_object/builders.rs +++ b/sdk/s3/src/operation/delete_object/builders.rs @@ -22,29 +22,17 @@ impl crate::operation::delete_object::builders::DeleteObjectInputBuilder { } /// Fluent builder constructing a request to `DeleteObject`. /// -///

    Removes an object from a bucket. The behavior depends on the bucket's versioning state:

    +///

    Removes an object from a bucket. The behavior depends on the bucket's versioning state. For more information, see Best practices to consider before deleting an object.

    +///

    To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA delete and to see example requests, see Using MFA delete and Sample request in the Amazon S3 User Guide.

    ///
      ///
    • -///

      If bucket versioning is not enabled, the operation permanently deletes the object.

    • +///

      S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

      ///
    • -///

      If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.

    • +///

      For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

      ///
    • -///

      If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object’s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.

    • -///
    -///
      -///
    • -///

      Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

    • -///
    • -///

      Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

    • +///

      MFA delete is not supported by directory buckets.

      ///
    ///
    -///

    To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true.

    -///

    If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.

    -///

    Directory buckets - MFA delete is not supported by directory buckets.

    -///
    -///

    You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

    -///

    Directory buckets - S3 Lifecycle is not supported by directory buckets.

    -///
    ///
    ///
    /// Permissions @@ -55,12 +43,16 @@ impl crate::operation::delete_object::builders::DeleteObjectInputBuilder { ///

    General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers.

    ///
      ///
    • -///

      s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission.

    • +///

      s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission.

      +///

      You can also use PutBucketLifecycle to delete objects in Amazon S3.

      +///
      ///
    • ///

      s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission.

    • +///
    • +///

      If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration permissions.

    • ///
    ///
  • -///

    Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

  • +///

    Directory buckets permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization.

    /// ///
    ///
    diff --git a/sdk/s3/src/operation/list_buckets.rs b/sdk/s3/src/operation/list_buckets.rs index 6f079a56c90b..ef10ad39f6e3 100644 --- a/sdk/s3/src/operation/list_buckets.rs +++ b/sdk/s3/src/operation/list_buckets.rs @@ -205,6 +205,16 @@ impl ::aws_smithy_runtime_api::client::ser_de::SerializeRequest for ListBucketsR query.push_kv("continuation-token", &::aws_smithy_http::query::fmt_string(inner_2)); } } + if let ::std::option::Option::Some(inner_3) = &_input.prefix { + { + query.push_kv("prefix", &::aws_smithy_http::query::fmt_string(inner_3)); + } + } + if let ::std::option::Option::Some(inner_4) = &_input.bucket_region { + { + query.push_kv("bucket-region", &::aws_smithy_http::query::fmt_string(inner_4)); + } + } ::std::result::Result::Ok(()) } #[allow(clippy::unnecessary_wraps)] diff --git a/sdk/s3/src/operation/list_buckets/_list_buckets_input.rs b/sdk/s3/src/operation/list_buckets/_list_buckets_input.rs index d509595ec416..456e6b4fa97d 100644 --- a/sdk/s3/src/operation/list_buckets/_list_buckets_input.rs +++ b/sdk/s3/src/operation/list_buckets/_list_buckets_input.rs @@ -9,6 +9,12 @@ pub struct ListBucketsInput { ///

    Length Constraints: Minimum length of 0. Maximum length of 1024.

    ///

    Required: No.

    pub continuation_token: ::std::option::Option<::std::string::String>, + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub prefix: ::std::option::Option<::std::string::String>, + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub bucket_region: ::std::option::Option<::std::string::String>, } impl ListBucketsInput { ///

    Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.

    @@ -21,6 +27,16 @@ impl ListBucketsInput { pub fn continuation_token(&self) -> ::std::option::Option<&str> { self.continuation_token.as_deref() } + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub fn prefix(&self) -> ::std::option::Option<&str> { + self.prefix.as_deref() + } + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub fn bucket_region(&self) -> ::std::option::Option<&str> { + self.bucket_region.as_deref() + } } impl ListBucketsInput { /// Creates a new builder-style object to manufacture [`ListBucketsInput`](crate::operation::list_buckets::ListBucketsInput). @@ -35,6 +51,8 @@ impl ListBucketsInput { pub struct ListBucketsInputBuilder { pub(crate) max_buckets: ::std::option::Option, pub(crate) continuation_token: ::std::option::Option<::std::string::String>, + pub(crate) prefix: ::std::option::Option<::std::string::String>, + pub(crate) bucket_region: ::std::option::Option<::std::string::String>, } impl ListBucketsInputBuilder { ///

    Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response.

    @@ -71,11 +89,47 @@ impl ListBucketsInputBuilder { pub fn get_continuation_token(&self) -> &::std::option::Option<::std::string::String> { &self.continuation_token } + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub fn prefix(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.prefix = ::std::option::Option::Some(input.into()); + self + } + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub fn set_prefix(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.prefix = input; + self + } + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub fn get_prefix(&self) -> &::std::option::Option<::std::string::String> { + &self.prefix + } + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub fn bucket_region(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.bucket_region = ::std::option::Option::Some(input.into()); + self + } + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub fn set_bucket_region(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.bucket_region = input; + self + } + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub fn get_bucket_region(&self) -> &::std::option::Option<::std::string::String> { + &self.bucket_region + } /// Consumes the builder and constructs a [`ListBucketsInput`](crate::operation::list_buckets::ListBucketsInput). pub fn build(self) -> ::std::result::Result { ::std::result::Result::Ok(crate::operation::list_buckets::ListBucketsInput { max_buckets: self.max_buckets, continuation_token: self.continuation_token, + prefix: self.prefix, + bucket_region: self.bucket_region, }) } } diff --git a/sdk/s3/src/operation/list_buckets/_list_buckets_output.rs b/sdk/s3/src/operation/list_buckets/_list_buckets_output.rs index d3df0e5f250b..768885f442b3 100644 --- a/sdk/s3/src/operation/list_buckets/_list_buckets_output.rs +++ b/sdk/s3/src/operation/list_buckets/_list_buckets_output.rs @@ -9,6 +9,9 @@ pub struct ListBucketsOutput { pub owner: ::std::option::Option, ///

    ContinuationToken is included in the response when there are more buckets that can be listed with pagination. The next ListBuckets request to Amazon S3 can be continued with this ContinuationToken. ContinuationToken is obfuscated and is not a real bucket.

    pub continuation_token: ::std::option::Option<::std::string::String>, + ///

    If Prefix was sent with the request, it is included in the response.

    + ///

    All bucket names in the response begin with the specified bucket name prefix.

    + pub prefix: ::std::option::Option<::std::string::String>, _extended_request_id: Option, _request_id: Option, } @@ -27,6 +30,11 @@ impl ListBucketsOutput { pub fn continuation_token(&self) -> ::std::option::Option<&str> { self.continuation_token.as_deref() } + ///

    If Prefix was sent with the request, it is included in the response.

    + ///

    All bucket names in the response begin with the specified bucket name prefix.

    + pub fn prefix(&self) -> ::std::option::Option<&str> { + self.prefix.as_deref() + } } impl crate::s3_request_id::RequestIdExt for ListBucketsOutput { fn extended_request_id(&self) -> Option<&str> { @@ -52,6 +60,7 @@ pub struct ListBucketsOutputBuilder { pub(crate) buckets: ::std::option::Option<::std::vec::Vec>, pub(crate) owner: ::std::option::Option, pub(crate) continuation_token: ::std::option::Option<::std::string::String>, + pub(crate) prefix: ::std::option::Option<::std::string::String>, _extended_request_id: Option, _request_id: Option, } @@ -104,6 +113,23 @@ impl ListBucketsOutputBuilder { pub fn get_continuation_token(&self) -> &::std::option::Option<::std::string::String> { &self.continuation_token } + ///

    If Prefix was sent with the request, it is included in the response.

    + ///

    All bucket names in the response begin with the specified bucket name prefix.

    + pub fn prefix(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.prefix = ::std::option::Option::Some(input.into()); + self + } + ///

    If Prefix was sent with the request, it is included in the response.

    + ///

    All bucket names in the response begin with the specified bucket name prefix.

    + pub fn set_prefix(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.prefix = input; + self + } + ///

    If Prefix was sent with the request, it is included in the response.

    + ///

    All bucket names in the response begin with the specified bucket name prefix.

    + pub fn get_prefix(&self) -> &::std::option::Option<::std::string::String> { + &self.prefix + } pub(crate) fn _extended_request_id(mut self, extended_request_id: impl Into) -> Self { self._extended_request_id = Some(extended_request_id.into()); self @@ -128,6 +154,7 @@ impl ListBucketsOutputBuilder { buckets: self.buckets, owner: self.owner, continuation_token: self.continuation_token, + prefix: self.prefix, _extended_request_id: self._extended_request_id, _request_id: self._request_id, } diff --git a/sdk/s3/src/operation/list_buckets/builders.rs b/sdk/s3/src/operation/list_buckets/builders.rs index 5116dc1c1397..20aa77a352e2 100644 --- a/sdk/s3/src/operation/list_buckets/builders.rs +++ b/sdk/s3/src/operation/list_buckets/builders.rs @@ -152,4 +152,38 @@ impl ListBucketsFluentBuilder { pub fn get_continuation_token(&self) -> &::std::option::Option<::std::string::String> { self.inner.get_continuation_token() } + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub fn prefix(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.inner = self.inner.prefix(input.into()); + self + } + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub fn set_prefix(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.inner = self.inner.set_prefix(input); + self + } + ///

    Limits the response to bucket names that begin with the specified bucket name prefix.

    + pub fn get_prefix(&self) -> &::std::option::Option<::std::string::String> { + self.inner.get_prefix() + } + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub fn bucket_region(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.inner = self.inner.bucket_region(input.into()); + self + } + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub fn set_bucket_region(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.inner = self.inner.set_bucket_region(input); + self + } + ///

    Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

    + ///

    Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

    + ///
    + pub fn get_bucket_region(&self) -> &::std::option::Option<::std::string::String> { + self.inner.get_bucket_region() + } } diff --git a/sdk/s3/src/operation/put_bucket_encryption/builders.rs b/sdk/s3/src/operation/put_bucket_encryption/builders.rs index 0f50eaecd12e..725338295709 100644 --- a/sdk/s3/src/operation/put_bucket_encryption/builders.rs +++ b/sdk/s3/src/operation/put_bucket_encryption/builders.rs @@ -41,7 +41,7 @@ impl crate::operation::put_bucket_encryption::builders::PutBucketEncryptionInput ///
  • ///

    We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

  • ///
  • -///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

  • +///

    Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    ///
  • ///

    S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

  • ///
  • diff --git a/sdk/s3/src/operation/put_object/_put_object_input.rs b/sdk/s3/src/operation/put_object/_put_object_input.rs index d7dac578d3ae..fea7f64c4883 100644 --- a/sdk/s3/src/operation/put_object/_put_object_input.rs +++ b/sdk/s3/src/operation/put_object/_put_object_input.rs @@ -33,7 +33,7 @@ pub struct PutObjectInput { ///

    Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length.

    pub content_length: ::std::option::Option, ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -54,8 +54,9 @@ pub struct PutObjectInput { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub checksum_algorithm: ::std::option::Option, ///

    This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.

    pub checksum_crc32: ::std::option::Option<::std::string::String>, @@ -155,7 +156,7 @@ pub struct PutObjectInput { pub sse_customer_key_md5: ::std::option::Option<::std::string::String>, ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub ssekms_key_id: ::std::option::Option<::std::string::String>, ///

    Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object.

    ///

    General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

    @@ -236,7 +237,7 @@ impl PutObjectInput { self.content_length } ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -261,8 +262,9 @@ impl PutObjectInput { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub fn checksum_algorithm(&self) -> ::std::option::Option<&crate::types::ChecksumAlgorithm> { self.checksum_algorithm.as_ref() } @@ -400,7 +402,7 @@ impl PutObjectInput { } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(&self) -> ::std::option::Option<&str> { self.ssekms_key_id.as_deref() } @@ -702,7 +704,7 @@ impl PutObjectInputBuilder { &self.content_length } ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -711,7 +713,7 @@ impl PutObjectInputBuilder { self } ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -720,7 +722,7 @@ impl PutObjectInputBuilder { self } ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -755,8 +757,9 @@ impl PutObjectInputBuilder { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub fn checksum_algorithm(mut self, input: crate::types::ChecksumAlgorithm) -> Self { self.checksum_algorithm = ::std::option::Option::Some(input); self @@ -775,8 +778,9 @@ impl PutObjectInputBuilder { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub fn set_checksum_algorithm(mut self, input: ::std::option::Option) -> Self { self.checksum_algorithm = input; self @@ -795,8 +799,9 @@ impl PutObjectInputBuilder { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub fn get_checksum_algorithm(&self) -> &::std::option::Option { &self.checksum_algorithm } @@ -1241,21 +1246,21 @@ impl PutObjectInputBuilder { } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.ssekms_key_id = ::std::option::Option::Some(input.into()); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.ssekms_key_id = input; self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { &self.ssekms_key_id } diff --git a/sdk/s3/src/operation/put_object/builders.rs b/sdk/s3/src/operation/put_object/builders.rs index 6d0cba445316..c0792bba51c2 100644 --- a/sdk/s3/src/operation/put_object/builders.rs +++ b/sdk/s3/src/operation/put_object/builders.rs @@ -375,7 +375,7 @@ impl PutObjectFluentBuilder { self.inner.get_content_length() } ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -384,7 +384,7 @@ impl PutObjectFluentBuilder { self } ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -393,7 +393,7 @@ impl PutObjectFluentBuilder { self } ///

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    - ///

    The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    ///

    This functionality is not supported for directory buckets.

    ///
    @@ -428,8 +428,9 @@ impl PutObjectFluentBuilder { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub fn checksum_algorithm(mut self, input: crate::types::ChecksumAlgorithm) -> Self { self.inner = self.inner.checksum_algorithm(input); self @@ -448,8 +449,9 @@ impl PutObjectFluentBuilder { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub fn set_checksum_algorithm(mut self, input: ::std::option::Option) -> Self { self.inner = self.inner.set_checksum_algorithm(input); self @@ -468,8 +470,9 @@ impl PutObjectFluentBuilder { /// ///

    For more information, see Checking object integrity in the Amazon S3 User Guide.

    ///

    If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm .

    - ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    + ///

    The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide.

    ///
    + ///

    For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

    pub fn get_checksum_algorithm(&self) -> &::std::option::Option { self.inner.get_checksum_algorithm() } @@ -912,21 +915,21 @@ impl PutObjectFluentBuilder { } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn ssekms_key_id(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.inner = self.inner.ssekms_key_id(input.into()); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn set_ssekms_key_id(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.inner = self.inner.set_ssekms_key_id(input); self } ///

    Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID.

    ///

    General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data.

    - ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

    + ///

    Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    pub fn get_ssekms_key_id(&self) -> &::std::option::Option<::std::string::String> { self.inner.get_ssekms_key_id() } diff --git a/sdk/s3/src/operation/restore_object/builders.rs b/sdk/s3/src/operation/restore_object/builders.rs index 8e06f59a3fb3..0cbdb01b448e 100644 --- a/sdk/s3/src/operation/restore_object/builders.rs +++ b/sdk/s3/src/operation/restore_object/builders.rs @@ -24,9 +24,7 @@ impl crate::operation::restore_object::builders::RestoreObjectInputBuilder { /// /// ///

    This operation is not supported by directory buckets.

    -///
    -///

    The SELECT job type for the RestoreObject operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    -///
    +/// ///

    Restores an archived copy of an object back into Amazon S3

    ///

    This functionality is not supported for Amazon S3 on Outposts.

    ///

    This action performs the following types of requests:

    diff --git a/sdk/s3/src/operation/select_object_content/_select_object_content_input.rs b/sdk/s3/src/operation/select_object_content/_select_object_content_input.rs index cec017cf24fc..f883c4c29836 100644 --- a/sdk/s3/src/operation/select_object_content/_select_object_content_input.rs +++ b/sdk/s3/src/operation/select_object_content/_select_object_content_input.rs @@ -1,8 +1,5 @@ // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. -/// -///

    Learn Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    -///
    ///

    Request to filter the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records. It returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. For more information, see S3Select API Documentation.

    #[non_exhaustive] #[derive(::std::clone::Clone, ::std::cmp::PartialEq)] diff --git a/sdk/s3/src/operation/select_object_content/builders.rs b/sdk/s3/src/operation/select_object_content/builders.rs index 0084ce42100d..d428b2638b31 100644 --- a/sdk/s3/src/operation/select_object_content/builders.rs +++ b/sdk/s3/src/operation/select_object_content/builders.rs @@ -24,9 +24,7 @@ impl crate::operation::select_object_content::builders::SelectObjectContentInput /// /// ///

    This operation is not supported by directory buckets.

    -///
    -///

    The SelectObjectContent operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the operation as usual. Learn more

    -///
    +/// ///

    This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

    ///

    This functionality is not supported for Amazon S3 on Outposts.

    ///

    For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.

    diff --git a/sdk/s3/src/protocol_serde/shape_bucket.rs b/sdk/s3/src/protocol_serde/shape_bucket.rs index 764a158f48a5..50cd3c1f13ea 100644 --- a/sdk/s3/src/protocol_serde/shape_bucket.rs +++ b/sdk/s3/src/protocol_serde/shape_bucket.rs @@ -32,6 +32,19 @@ pub fn de_bucket(decoder: &mut ::aws_smithy_xml::decode::ScopedDecoder) -> Resul builder = builder.set_creation_date(var_2); } , + s if s.matches("BucketRegion") /* BucketRegion com.amazonaws.s3#Bucket$BucketRegion */ => { + let var_3 = + Some( + Result::<::std::string::String, ::aws_smithy_xml::decode::XmlDecodeError>::Ok( + ::aws_smithy_xml::decode::try_data(&mut tag)?.as_ref() + .into() + ) + ? + ) + ; + builder = builder.set_bucket_region(var_3); + } + , _ => {} } } diff --git a/sdk/s3/src/protocol_serde/shape_list_buckets.rs b/sdk/s3/src/protocol_serde/shape_list_buckets.rs index a1466f4f3f5b..7c753a7dd18d 100644 --- a/sdk/s3/src/protocol_serde/shape_list_buckets.rs +++ b/sdk/s3/src/protocol_serde/shape_list_buckets.rs @@ -73,14 +73,27 @@ pub fn de_list_buckets( builder = builder.set_continuation_token(var_2); } , - s if s.matches("Buckets") /* Buckets com.amazonaws.s3.synthetic#ListBucketsOutput$Buckets */ => { + s if s.matches("Prefix") /* Prefix com.amazonaws.s3.synthetic#ListBucketsOutput$Prefix */ => { let var_3 = + Some( + Result::<::std::string::String, ::aws_smithy_xml::decode::XmlDecodeError>::Ok( + ::aws_smithy_xml::decode::try_data(&mut tag)?.as_ref() + .into() + ) + ? + ) + ; + builder = builder.set_prefix(var_3); + } + , + s if s.matches("Buckets") /* Buckets com.amazonaws.s3.synthetic#ListBucketsOutput$Buckets */ => { + let var_4 = Some( crate::protocol_serde::shape_buckets::de_buckets(&mut tag) ? ) ; - builder = builder.set_buckets(var_3); + builder = builder.set_buckets(var_4); } , _ => {} diff --git a/sdk/s3/src/types/_bucket.rs b/sdk/s3/src/types/_bucket.rs index 1657979e1746..aefb01415e6d 100644 --- a/sdk/s3/src/types/_bucket.rs +++ b/sdk/s3/src/types/_bucket.rs @@ -8,6 +8,8 @@ pub struct Bucket { pub name: ::std::option::Option<::std::string::String>, ///

    Date the bucket was created. This date can change when making changes to your bucket, such as editing its bucket policy.

    pub creation_date: ::std::option::Option<::aws_smithy_types::DateTime>, + ///

    BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response.

    + pub bucket_region: ::std::option::Option<::std::string::String>, } impl Bucket { ///

    The name of the bucket.

    @@ -18,6 +20,10 @@ impl Bucket { pub fn creation_date(&self) -> ::std::option::Option<&::aws_smithy_types::DateTime> { self.creation_date.as_ref() } + ///

    BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response.

    + pub fn bucket_region(&self) -> ::std::option::Option<&str> { + self.bucket_region.as_deref() + } } impl Bucket { /// Creates a new builder-style object to manufacture [`Bucket`](crate::types::Bucket). @@ -32,6 +38,7 @@ impl Bucket { pub struct BucketBuilder { pub(crate) name: ::std::option::Option<::std::string::String>, pub(crate) creation_date: ::std::option::Option<::aws_smithy_types::DateTime>, + pub(crate) bucket_region: ::std::option::Option<::std::string::String>, } impl BucketBuilder { ///

    The name of the bucket.

    @@ -62,11 +69,26 @@ impl BucketBuilder { pub fn get_creation_date(&self) -> &::std::option::Option<::aws_smithy_types::DateTime> { &self.creation_date } + ///

    BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response.

    + pub fn bucket_region(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { + self.bucket_region = ::std::option::Option::Some(input.into()); + self + } + ///

    BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response.

    + pub fn set_bucket_region(mut self, input: ::std::option::Option<::std::string::String>) -> Self { + self.bucket_region = input; + self + } + ///

    BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response.

    + pub fn get_bucket_region(&self) -> &::std::option::Option<::std::string::String> { + &self.bucket_region + } /// Consumes the builder and constructs a [`Bucket`](crate::types::Bucket). pub fn build(self) -> crate::types::Bucket { crate::types::Bucket { name: self.name, creation_date: self.creation_date, + bucket_region: self.bucket_region, } } } diff --git a/sdk/s3/src/types/_public_access_block_configuration.rs b/sdk/s3/src/types/_public_access_block_configuration.rs index 4e893870e40a..5569212c2fb9 100644 --- a/sdk/s3/src/types/_public_access_block_configuration.rs +++ b/sdk/s3/src/types/_public_access_block_configuration.rs @@ -21,7 +21,7 @@ pub struct PublicAccessBlockConfiguration { ///

    Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.

    ///

    Enabling this setting doesn't affect existing bucket policies.

    pub block_public_policy: ::std::option::Option, - ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy.

    + ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has a public policy.

    ///

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    pub restrict_public_buckets: ::std::option::Option, } @@ -49,7 +49,7 @@ impl PublicAccessBlockConfiguration { pub fn block_public_policy(&self) -> ::std::option::Option { self.block_public_policy } - ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy.

    + ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has a public policy.

    ///

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    pub fn restrict_public_buckets(&self) -> ::std::option::Option { self.restrict_public_buckets @@ -147,19 +147,19 @@ impl PublicAccessBlockConfigurationBuilder { pub fn get_block_public_policy(&self) -> &::std::option::Option { &self.block_public_policy } - ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy.

    + ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has a public policy.

    ///

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    pub fn restrict_public_buckets(mut self, input: bool) -> Self { self.restrict_public_buckets = ::std::option::Option::Some(input); self } - ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy.

    + ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has a public policy.

    ///

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    pub fn set_restrict_public_buckets(mut self, input: ::std::option::Option) -> Self { self.restrict_public_buckets = input; self } - ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy.

    + ///

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has a public policy.

    ///

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    pub fn get_restrict_public_buckets(&self) -> &::std::option::Option { &self.restrict_public_buckets diff --git a/sdk/s3/src/types/_restore_request.rs b/sdk/s3/src/types/_restore_request.rs index be2decfafccd..fd098ec6d23e 100644 --- a/sdk/s3/src/types/_restore_request.rs +++ b/sdk/s3/src/types/_restore_request.rs @@ -9,18 +9,12 @@ pub struct RestoreRequest { pub days: ::std::option::Option, ///

    S3 Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

    pub glacier_job_parameters: ::std::option::Option, - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Type of restore request.

    pub r#type: ::std::option::Option, ///

    Retrieval tier at which the restore will be processed.

    pub tier: ::std::option::Option, ///

    The optional description for the job.

    pub description: ::std::option::Option<::std::string::String>, - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Describes the parameters for Select job types.

    pub select_parameters: ::std::option::Option, ///

    Describes the location where the restore job's output is stored.

    @@ -36,9 +30,6 @@ impl RestoreRequest { pub fn glacier_job_parameters(&self) -> ::std::option::Option<&crate::types::GlacierJobParameters> { self.glacier_job_parameters.as_ref() } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Type of restore request.

    pub fn r#type(&self) -> ::std::option::Option<&crate::types::RestoreRequestType> { self.r#type.as_ref() @@ -51,9 +42,6 @@ impl RestoreRequest { pub fn description(&self) -> ::std::option::Option<&str> { self.description.as_deref() } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Describes the parameters for Select job types.

    pub fn select_parameters(&self) -> ::std::option::Option<&crate::types::SelectParameters> { self.select_parameters.as_ref() @@ -114,25 +102,16 @@ impl RestoreRequestBuilder { pub fn get_glacier_job_parameters(&self) -> &::std::option::Option { &self.glacier_job_parameters } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Type of restore request.

    pub fn r#type(mut self, input: crate::types::RestoreRequestType) -> Self { self.r#type = ::std::option::Option::Some(input); self } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Type of restore request.

    pub fn set_type(mut self, input: ::std::option::Option) -> Self { self.r#type = input; self } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Type of restore request.

    pub fn get_type(&self) -> &::std::option::Option { &self.r#type @@ -165,25 +144,16 @@ impl RestoreRequestBuilder { pub fn get_description(&self) -> &::std::option::Option<::std::string::String> { &self.description } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Describes the parameters for Select job types.

    pub fn select_parameters(mut self, input: crate::types::SelectParameters) -> Self { self.select_parameters = ::std::option::Option::Some(input); self } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Describes the parameters for Select job types.

    pub fn set_select_parameters(mut self, input: ::std::option::Option) -> Self { self.select_parameters = input; self } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    Describes the parameters for Select job types.

    pub fn get_select_parameters(&self) -> &::std::option::Option { &self.select_parameters diff --git a/sdk/s3/src/types/_select_parameters.rs b/sdk/s3/src/types/_select_parameters.rs index 14be51fe6dfd..029acbde671a 100644 --- a/sdk/s3/src/types/_select_parameters.rs +++ b/sdk/s3/src/types/_select_parameters.rs @@ -1,10 +1,6 @@ // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. -/// -///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    -///
    ///

    Describes the parameters for Select job types.

    -///

    Learn How to optimize querying your data in Amazon S3 using Amazon Athena, S3 Object Lambda, or client-side filtering.

    #[non_exhaustive] #[derive(::std::clone::Clone, ::std::cmp::PartialEq, ::std::fmt::Debug)] pub struct SelectParameters { @@ -12,9 +8,6 @@ pub struct SelectParameters { pub input_serialization: ::std::option::Option, ///

    The type of the provided expression (for example, SQL).

    pub expression_type: crate::types::ExpressionType, - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    The expression that is used to query the object.

    pub expression: ::std::string::String, ///

    Describes how the results of the Select job are serialized.

    @@ -29,9 +22,6 @@ impl SelectParameters { pub fn expression_type(&self) -> &crate::types::ExpressionType { &self.expression_type } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    The expression that is used to query the object.

    pub fn expression(&self) -> &str { use std::ops::Deref; @@ -89,26 +79,17 @@ impl SelectParametersBuilder { pub fn get_expression_type(&self) -> &::std::option::Option { &self.expression_type } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    The expression that is used to query the object.

    /// This field is required. pub fn expression(mut self, input: impl ::std::convert::Into<::std::string::String>) -> Self { self.expression = ::std::option::Option::Some(input.into()); self } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    The expression that is used to query the object.

    pub fn set_expression(mut self, input: ::std::option::Option<::std::string::String>) -> Self { self.expression = input; self } - /// - ///

    Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more

    - ///
    ///

    The expression that is used to query the object.

    pub fn get_expression(&self) -> &::std::option::Option<::std::string::String> { &self.expression diff --git a/sdk/s3/src/types/_server_side_encryption_by_default.rs b/sdk/s3/src/types/_server_side_encryption_by_default.rs index 193d5ce54008..2b23fae1f1e4 100644 --- a/sdk/s3/src/types/_server_side_encryption_by_default.rs +++ b/sdk/s3/src/types/_server_side_encryption_by_default.rs @@ -5,7 +5,7 @@ ///
  • ///

    General purpose buckets - If you don't specify a customer managed key at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key (aws/s3) in your Amazon Web Services account the first time that you add an object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS.

  • ///
  • -///

    Directory buckets - Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. Amazon Web Services managed key (aws/s3) isn't supported.

  • +///

    Directory buckets - Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported.

    ///
  • ///

    Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS.

  • /// diff --git a/tests/no-default-features/Cargo.toml b/tests/no-default-features/Cargo.toml index 298faf77166e..c64d4559d1f6 100644 --- a/tests/no-default-features/Cargo.toml +++ b/tests/no-default-features/Cargo.toml @@ -21,7 +21,7 @@ version = "1.5.8" [dev-dependencies.aws-sdk-s3] path = "../../sdk/s3" default-features = false -version = "1.56.0" +version = "1.57.0" [dev-dependencies.aws-smithy-async] path = "../../sdk/aws-smithy-async" diff --git a/versions.toml b/versions.toml index 67d960efa9e8..46fee4715e1a 100644 --- a/versions.toml +++ b/versions.toml @@ -1918,9 +1918,9 @@ model_hash = '34648839402316ab9da4bef0fe11beb9bc3ec1d05b3ff7d09664ab74c0cb3d2b' [crates.aws-sdk-s3] category = 'AwsSdk' -version = '1.56.0' -source_hash = '36aea4a7770c1b9382cdb6f8fedb7f84f9188a768c69c91d63c680040aabd295' -model_hash = '588b323ec168ed4350d55639ea5096efe44132b312410c3a08d893a17f8a4388' +version = '1.57.0' +source_hash = '20843d1c84b247902e5f3eb4e97d810299063fac3338400d0e599940294b1a70' +model_hash = 'f3dd5b9e103d035ffa51cc6666602b2e486df8d192431008e266211f3ee1ad7d' [crates.aws-sdk-s3control] category = 'AwsSdk' @@ -2499,17 +2499,5 @@ source_hash = '80dac965e6bba4c1f69c0329fa21203c182f3eb034792d8f4c4fa94ba15ca72f' category = 'AwsRuntime' version = '0.0.0' source_hash = '2fe25f627f326867fe30559e73c464e12361c13b66e7ffc0bc95aa91a482fafd' - -[release] -tag = 'release-2024-10-15' - [release.crates] -aws-sdk-amplify = '1.50.0' -aws-sdk-cloudformation = '1.51.1' -aws-sdk-codebuild = '1.58.0' -aws-sdk-ivs = '1.51.0' -aws-sdk-qbusiness = '1.49.0' -aws-sdk-redshift = '1.49.0' -aws-sdk-resiliencehub = '1.48.0' -aws-sdk-sesv2 = '1.51.0' -aws-sdk-transcribestreaming = '1.47.0' +aws-sdk-s3 = '1.57.0'