From 573996578969776bda1c2981c935600319c073a6 Mon Sep 17 00:00:00 2001 From: awssdkgo Date: Fri, 24 Jul 2020 18:15:32 +0000 Subject: [PATCH] Release v1.33.12 (2020-07-24) === ### Service Client Updates * `service/frauddetector`: Updates service API and documentation * `service/fsx`: Updates service documentation * `service/kendra`: Updates service API and documentation * Amazon Kendra now supports sorting query results based on document attributes. Amazon Kendra also introduced an option to enclose table and column names with double quotes for database data sources. * `service/macie2`: Updates service API and documentation * `service/mediaconnect`: Updates service API and documentation * `service/mediapackage`: Updates service API and documentation * The release adds daterange as a new ad marker option. This option enables MediaPackage to insert EXT-X-DATERANGE tags in HLS and CMAF manifests. The EXT-X-DATERANGE tag is used to signal ad and program transition events. * `service/monitoring`: Updates service API and documentation * AWS CloudWatch ListMetrics now supports an optional parameter (RecentlyActive) to filter results by only metrics that have received new datapoints in the past 3 hours. This enables more targeted metric data retrieval through the Get APIs * `service/mq`: Updates service API, documentation, and paginators * Amazon MQ now supports LDAP (Lightweight Directory Access Protocol), providing authentication and authorization of Amazon MQ users via a customer designated LDAP server. * `service/sagemaker`: Updates service API, documentation, and paginators * Sagemaker Ground Truth:Added support for OIDC (OpenID Connect) to authenticate workers via their own identity provider instead of through Amazon Cognito. This release adds new APIs (CreateWorkforce, DeleteWorkforce, and ListWorkforces) to SageMaker Ground Truth service. Sagemaker Neo: Added support for detailed target device description by using TargetPlatform fields - OS, architecture, and accelerator. Added support for additional compilation parameters by using JSON field CompilerOptions. Sagemaker Search: SageMaker Search supports transform job details in trial components. ### SDK Bugs * `service/s3/s3crypto`: Fix client's temporary file buffer error on retry ([#3344](https://github.com/aws/aws-sdk-go/pull/3344)) * Fixes the Crypto client's temporary file buffer cleanup returning an error when the request is retried. --- CHANGELOG.md | 23 + CHANGELOG_PENDING.md | 2 - aws/version.go | 2 +- .../apis/frauddetector/2019-11-15/api-2.json | 87 +- .../apis/frauddetector/2019-11-15/docs-2.json | 63 +- models/apis/fsx/2018-03-01/docs-2.json | 20 +- models/apis/kendra/2019-02-03/api-2.json | 42 +- models/apis/kendra/2019-02-03/docs-2.json | 32 +- models/apis/macie2/2020-01-01/api-2.json | 25 +- models/apis/macie2/2020-01-01/docs-2.json | 30 +- .../apis/mediaconnect/2018-11-14/api-2.json | 19 + .../apis/mediaconnect/2018-11-14/docs-2.json | 12 +- .../apis/mediapackage/2017-10-12/api-2.json | 9 +- .../apis/mediapackage/2017-10-12/docs-2.json | 8 +- models/apis/monitoring/2010-08-01/api-2.json | 7 +- models/apis/monitoring/2010-08-01/docs-2.json | 68 +- models/apis/mq/2017-11-27/api-2.json | 202 ++ models/apis/mq/2017-11-27/docs-2.json | 53 +- models/apis/mq/2017-11-27/paginators-1.json | 9 +- models/apis/sagemaker/2017-07-24/api-2.json | 320 ++- models/apis/sagemaker/2017-07-24/docs-2.json | 265 ++- .../sagemaker/2017-07-24/paginators-1.json | 6 + service/cloudwatch/api.go | 120 +- service/frauddetector/api.go | 432 +--- .../frauddetectoriface/interface.go | 4 - service/fsx/api.go | 143 +- service/kendra/api.go | 170 +- service/macie2/api.go | 72 +- service/mediaconnect/api.go | 40 + service/mediapackage/api.go | 18 +- service/mq/api.go | 431 +++- service/mq/mqiface/interface.go | 3 + service/sagemaker/api.go | 1934 ++++++++++++++++- service/sagemaker/sagemakeriface/interface.go | 15 + 34 files changed, 3882 insertions(+), 804 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 155b13b9349..6d264ac0b8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +Release v1.33.12 (2020-07-24) +=== + +### Service Client Updates +* `service/frauddetector`: Updates service API and documentation +* `service/fsx`: Updates service documentation +* `service/kendra`: Updates service API and documentation + * Amazon Kendra now supports sorting query results based on document attributes. Amazon Kendra also introduced an option to enclose table and column names with double quotes for database data sources. +* `service/macie2`: Updates service API and documentation +* `service/mediaconnect`: Updates service API and documentation +* `service/mediapackage`: Updates service API and documentation + * The release adds daterange as a new ad marker option. This option enables MediaPackage to insert EXT-X-DATERANGE tags in HLS and CMAF manifests. The EXT-X-DATERANGE tag is used to signal ad and program transition events. +* `service/monitoring`: Updates service API and documentation + * AWS CloudWatch ListMetrics now supports an optional parameter (RecentlyActive) to filter results by only metrics that have received new datapoints in the past 3 hours. This enables more targeted metric data retrieval through the Get APIs +* `service/mq`: Updates service API, documentation, and paginators + * Amazon MQ now supports LDAP (Lightweight Directory Access Protocol), providing authentication and authorization of Amazon MQ users via a customer designated LDAP server. +* `service/sagemaker`: Updates service API, documentation, and paginators + * Sagemaker Ground Truth:Added support for OIDC (OpenID Connect) to authenticate workers via their own identity provider instead of through Amazon Cognito. This release adds new APIs (CreateWorkforce, DeleteWorkforce, and ListWorkforces) to SageMaker Ground Truth service. Sagemaker Neo: Added support for detailed target device description by using TargetPlatform fields - OS, architecture, and accelerator. Added support for additional compilation parameters by using JSON field CompilerOptions. Sagemaker Search: SageMaker Search supports transform job details in trial components. + +### SDK Bugs +* `service/s3/s3crypto`: Fix client's temporary file buffer error on retry ([#3344](https://github.com/aws/aws-sdk-go/pull/3344)) + * Fixes the Crypto client's temporary file buffer cleanup returning an error when the request is retried. + Release v1.33.11 (2020-07-23) === diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 94057b39473..8a1927a39ca 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -3,5 +3,3 @@ ### SDK Enhancements ### SDK Bugs -* `service/s3/s3crypto`: Fix client's temporary file buffer error on retry ([#3344](https://github.com/aws/aws-sdk-go/pull/3344)) - * Fixes the Crypto client's temporary file buffer cleanup returning an error when the request is retried. diff --git a/aws/version.go b/aws/version.go index 22d832a7a86..1a859e6ba96 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.33.11" +const SDKVersion = "1.33.12" diff --git a/models/apis/frauddetector/2019-11-15/api-2.json b/models/apis/frauddetector/2019-11-15/api-2.json index 0966d831e83..8bdaa1dc0ab 100644 --- a/models/apis/frauddetector/2019-11-15/api-2.json +++ b/models/apis/frauddetector/2019-11-15/api-2.json @@ -379,22 +379,6 @@ {"shape":"AccessDeniedException"} ] }, - "GetPrediction":{ - "name":"GetPrediction", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetPredictionRequest"}, - "output":{"shape":"GetPredictionResult"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"} - ] - }, "GetRules":{ "name":"GetRules", "http":{ @@ -1084,11 +1068,6 @@ "arn":{"shape":"fraudDetectorArn"} } }, - "EventAttributeMap":{ - "type":"map", - "key":{"shape":"attributeKey"}, - "value":{"shape":"attributeValue"} - }, "EventType":{ "type":"structure", "members":{ @@ -1125,7 +1104,7 @@ "modelEndpoint":{"shape":"string"}, "eventTypeName":{"shape":"identifier"}, "modelSource":{"shape":"ModelSource"}, - "role":{"shape":"Role"}, + "invokeModelEndpointRoleArn":{"shape":"string"}, "inputConfiguration":{"shape":"ModelInputConfiguration"}, "outputConfiguration":{"shape":"ModelOutputConfiguration"}, "modelEndpointStatus":{"shape":"ModelEndpointStatus"}, @@ -1237,7 +1216,7 @@ ], "members":{ "detectorId":{"shape":"string"}, - "detectorVersionId":{"shape":"string"}, + "detectorVersionId":{"shape":"wholeNumberVersionString"}, "eventId":{"shape":"string"}, "eventTypeName":{"shape":"string"}, "entities":{"shape":"listOfEntities"}, @@ -1361,28 +1340,6 @@ "nextToken":{"shape":"string"} } }, - "GetPredictionRequest":{ - "type":"structure", - "required":[ - "detectorId", - "eventId" - ], - "members":{ - "detectorId":{"shape":"string"}, - "detectorVersionId":{"shape":"string"}, - "eventId":{"shape":"string"}, - "eventAttributes":{"shape":"EventAttributeMap"}, - "externalModelEndpointDataBlobs":{"shape":"ExternalModelEndpointDataBlobMap"} - } - }, - "GetPredictionResult":{ - "type":"structure", - "members":{ - "outcomes":{"shape":"ListOfStrings"}, - "modelScores":{"shape":"ListOfModelScores"}, - "ruleResults":{"shape":"ListOfRuleResults"} - } - }, "GetRulesRequest":{ "type":"structure", "required":["detectorId"], @@ -1616,16 +1573,8 @@ "ModelVersionStatus":{ "type":"string", "enum":[ - "TRAINING_IN_PROGRESS", - "TRAINING_COMPLETE", - "ACTIVATE_REQUESTED", - "ACTIVATE_IN_PROGRESS", "ACTIVE", - "INACTIVATE_IN_PROGRESS", - "INACTIVE", - "DELETE_REQUESTED", - "DELETE_IN_PROGRESS", - "ERROR" + "INACTIVE" ] }, "NameList":{ @@ -1717,7 +1666,7 @@ "required":[ "modelEndpoint", "modelSource", - "role", + "invokeModelEndpointRoleArn", "inputConfiguration", "outputConfiguration", "modelEndpointStatus" @@ -1726,7 +1675,7 @@ "modelEndpoint":{"shape":"sageMakerEndpointIdentifier"}, "eventTypeName":{"shape":"identifier"}, "modelSource":{"shape":"ModelSource"}, - "role":{"shape":"Role"}, + "invokeModelEndpointRoleArn":{"shape":"string"}, "inputConfiguration":{"shape":"ModelInputConfiguration"}, "outputConfiguration":{"shape":"ModelOutputConfiguration"}, "modelEndpointStatus":{"shape":"ModelEndpointStatus"}, @@ -1786,17 +1735,6 @@ }, "exception":true }, - "Role":{ - "type":"structure", - "required":[ - "arn", - "name" - ], - "members":{ - "arn":{"shape":"string"}, - "name":{"shape":"string"} - } - }, "Rule":{ "type":"structure", "required":[ @@ -2163,17 +2101,6 @@ "max":100, "min":50 }, - "attributeKey":{ - "type":"string", - "max":64, - "min":1 - }, - "attributeValue":{ - "type":"string", - "max":256, - "min":1, - "sensitive":true - }, "blob":{"type":"blob"}, "contentType":{ "type":"string", @@ -2216,6 +2143,8 @@ "float":{"type":"float"}, "floatVersionString":{ "type":"string", + "max":7, + "min":3, "pattern":"^[1-9][0-9]{0,3}\\.[0-9]{1,2}$" }, "fraudDetectorArn":{ @@ -2334,6 +2263,8 @@ }, "wholeNumberVersionString":{ "type":"string", + "max":5, + "min":1, "pattern":"^([1-9][0-9]*)$" } } diff --git a/models/apis/frauddetector/2019-11-15/docs-2.json b/models/apis/frauddetector/2019-11-15/docs-2.json index 33d71a0fc6a..bc7197673be 100644 --- a/models/apis/frauddetector/2019-11-15/docs-2.json +++ b/models/apis/frauddetector/2019-11-15/docs-2.json @@ -26,7 +26,6 @@ "GetModelVersion": "

Gets the details of the specified model version.

", "GetModels": "

Gets one or more models. Gets all models for the AWS account if no model type and no model id provided. Gets all models for the AWS account and model type, if the model type is specified but model id is not provided. Gets a specific model if (model type, model id) tuple is specified.

This is a paginated API. If you provide a null maxResults, this action retrieves a maximum of 10 records per page. If you provide a maxResults, the value must be between 1 and 10. To get the next page results, provide the pagination token from the response as part of your request. A null pagination token fetches the records from the beginning.

", "GetOutcomes": "

Gets one or more outcomes. This is a paginated API. If you provide a null maxResults, this actions retrieves a maximum of 100 records per page. If you provide a maxResults, the value must be between 50 and 100. To get the next page results, provide the pagination token from the GetOutcomesResult as part of your request. A null pagination token fetches the records from the beginning.

", - "GetPrediction": "

Evaluates an event against a detector version. If a version ID is not provided, the detector’s (ACTIVE) version is used.

", "GetRules": "

Get all rules for a detector (paginated) if ruleId and ruleVersion are not specified. Gets all rules for the detector and the ruleId if present (paginated). Gets a specific rule if both the ruleId and the ruleVersion are specified.

This is a paginated API. Providing null maxResults results in retrieving maximum of 100 records per page. If you provide maxResults the value must be between 50 and 100. To get the next page result, a provide a pagination token from GetRulesResult as part of your request. Null pagination token fetches the records from the beginning.

", "GetVariables": "

Gets all of the variables or the specific variable. This is a paginated API. Providing null maxSizePerPage results in retrieving maximum of 100 records per page. If you provide maxSizePerPage the value must be between 50 and 100. To get the next page result, a provide a pagination token from GetVariablesResult as part of your request. Null pagination token fetches the records from the beginning.

", "ListTagsForResource": "

Lists all tags associated with the resource. This is a paginated API. To get the next page results, provide the pagination token from the response as part of your request. A null pagination token fetches the records from the beginning.

", @@ -171,7 +170,7 @@ "base": null, "refs": { "CreateVariableRequest$dataType": "

The data type.

", - "Variable$dataType": "

The data type of the variable.

" + "Variable$dataType": "

The data type of the variable. For more information see Variable types.

" } }, "DataValidationMetrics": { @@ -297,12 +296,6 @@ "entityTypeList$member": null } }, - "EventAttributeMap": { - "base": null, - "refs": { - "GetPredictionRequest$eventAttributes": "

Names of variables you defined in Amazon Fraud Detector to represent event data elements and their corresponding values for the event you are sending for evaluation.

" - } - }, "EventType": { "base": "

The event type details.

", "refs": { @@ -333,8 +326,7 @@ "ExternalModelEndpointDataBlobMap": { "base": null, "refs": { - "GetEventPredictionRequest$externalModelEndpointDataBlobs": "

The Amazon SageMaker model endpoint input data blobs.

", - "GetPredictionRequest$externalModelEndpointDataBlobs": "

The Amazon SageMaker model endpoint input data blobs.

" + "GetEventPredictionRequest$externalModelEndpointDataBlobs": "

The Amazon SageMaker model endpoint input data blobs.

" } }, "ExternalModelList": { @@ -466,16 +458,6 @@ "refs": { } }, - "GetPredictionRequest": { - "base": null, - "refs": { - } - }, - "GetPredictionResult": { - "base": null, - "refs": { - } - }, "GetRulesRequest": { "base": null, "refs": { @@ -543,8 +525,7 @@ "ListOfModelScores": { "base": null, "refs": { - "GetEventPredictionResult$modelScores": "

The model scores. Amazon Fraud Detector generates model scores between 0 and 1000, where 0 is low fraud risk and 1000 is high fraud risk. Model scores are directly related to the false positive rate (FPR). For example, a score of 600 corresponds to an estimated 10% false positive rate whereas a score of 900 corresponds to an estimated 2% false positive rate.

", - "GetPredictionResult$modelScores": "

The model scores for models used in the detector version.

" + "GetEventPredictionResult$modelScores": "

The model scores. Amazon Fraud Detector generates model scores between 0 and 1000, where 0 is low fraud risk and 1000 is high fraud risk. Model scores are directly related to the false positive rate (FPR). For example, a score of 600 corresponds to an estimated 10% false positive rate whereas a score of 900 corresponds to an estimated 2% false positive rate.

" } }, "ListOfModelVersions": { @@ -558,8 +539,7 @@ "ListOfRuleResults": { "base": null, "refs": { - "GetEventPredictionResult$ruleResults": "

The results.

", - "GetPredictionResult$ruleResults": "

The rule results in the prediction.

" + "GetEventPredictionResult$ruleResults": "

The results.

" } }, "ListOfStrings": { @@ -569,7 +549,6 @@ "EventType$eventVariables": "

The event type event variables.

", "EventType$labels": "

The event type labels.

", "GetDetectorVersionResult$externalModelEndpoints": "

The Amazon SageMaker model endpoints included in the detector version.

", - "GetPredictionResult$outcomes": "

The prediction outcomes.

", "PutEventTypeRequest$labels": "

The event type labels.

", "RuleResult$outcomes": "

The outcomes of the matched rule, based on the rule execution mode.

", "TrainingDataSchema$modelVariables": "

The training data schema variables.

", @@ -805,13 +784,6 @@ "refs": { } }, - "Role": { - "base": "

The role used to invoke external model endpoints.

", - "refs": { - "ExternalModel$role": "

The role used to invoke the model.

", - "PutExternalModelRequest$role": "

The IAM role used to invoke the model endpoint.

" - } - }, "Rule": { "base": "

A rule.

", "refs": { @@ -1060,18 +1032,6 @@ "GetVariablesRequest$maxResults": "

The max size per page determined for the get variable request.

" } }, - "attributeKey": { - "base": null, - "refs": { - "EventAttributeMap$key": null - } - }, - "attributeValue": { - "base": null, - "refs": { - "EventAttributeMap$value": null - } - }, "blob": { "base": null, "refs": { @@ -1351,7 +1311,7 @@ "CreateVariableRequest$name": "

The name of the variable.

", "CreateVariableRequest$defaultValue": "

The default value for the variable when no value is received.

", "CreateVariableRequest$description": "

The description.

", - "CreateVariableRequest$variableType": "

The variable type.

Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT | SHIPPING_ZIP | USERAGENT

", + "CreateVariableRequest$variableType": "

The variable type. For more information see Variable types.

Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT | SHIPPING_ZIP | USERAGENT

", "CsvIndexToVariableMap$key": null, "CsvIndexToVariableMap$value": null, "DeleteEventRequest$eventId": "

The ID of the event to delete.

", @@ -1364,6 +1324,7 @@ "EntityType$name": "

The entity type name.

", "EventType$name": "

The event type name.

", "ExternalModel$modelEndpoint": "

The Amazon SageMaker model endpoints.

", + "ExternalModel$invokeModelEndpointRoleArn": "

The role used to invoke the model.

", "ExternalModelEndpointDataBlobMap$key": null, "FieldValidationMessage$fieldName": "

The field name.

", "FieldValidationMessage$identifier": "

The message ID.

", @@ -1378,7 +1339,6 @@ "GetEntityTypesRequest$nextToken": "

The next token for the subsequent request.

", "GetEntityTypesResult$nextToken": "

The next page token.

", "GetEventPredictionRequest$detectorId": "

The detector ID.

", - "GetEventPredictionRequest$detectorVersionId": "

The detector version ID.

", "GetEventPredictionRequest$eventId": "

The unique ID used to identify the event.

", "GetEventPredictionRequest$eventTypeName": "

The event type associated with the detector specified for the prediction.

", "GetEventPredictionRequest$eventTimestamp": "

Timestamp that defines when the event under evaluation occurred.

", @@ -1394,9 +1354,6 @@ "GetModelsResult$nextToken": "

The next page token to be used in subsequent requests.

", "GetOutcomesRequest$nextToken": "

The next page token for the request.

", "GetOutcomesResult$nextToken": "

The next page token for subsequent requests.

", - "GetPredictionRequest$detectorId": "

The detector ID.

", - "GetPredictionRequest$detectorVersionId": "

The detector version ID.

", - "GetPredictionRequest$eventId": "

The unique ID used to identify the event.

", "GetRulesRequest$nextToken": "

The next page token.

", "GetRulesResult$nextToken": "

The next page token to be used in subsequent requests.

", "GetVariablesRequest$name": "

The name of the variable.

", @@ -1416,16 +1373,15 @@ "ModelVersionDetail$status": "

The status of the model version.

", "NameList$member": null, "NonEmptyListOfStrings$member": null, + "PutExternalModelRequest$invokeModelEndpointRoleArn": "

The IAM role used to invoke the model endpoint.

", "ResourceNotFoundException$message": null, - "Role$arn": "

The role ARN.

", - "Role$name": "

The role name.

", "RuleResult$ruleId": "

The rule ID that was matched, based on the rule execution mode.

", "ThrottlingException$message": null, "UpdateModelVersionResult$status": "

The status of the updated model version.

", "UpdateVariableRequest$name": "

The name of the variable.

", "UpdateVariableRequest$defaultValue": "

The new default value of the variable.

", "UpdateVariableRequest$description": "

The new description.

", - "UpdateVariableRequest$variableType": "

The variable type.

", + "UpdateVariableRequest$variableType": "

The variable type. For more information see Variable types.

", "ValidationException$message": null, "Variable$name": "

The name of the variable.

", "Variable$defaultValue": "

The default value of the variable.

", @@ -1436,7 +1392,7 @@ "VariableEntry$dataSource": "

The data source of the variable.

", "VariableEntry$defaultValue": "

The default value of the variable.

", "VariableEntry$description": "

The description of the variable.

", - "VariableEntry$variableType": "

The type of the variable.

Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT | SHIPPING_ZIP | USERAGENT

", + "VariableEntry$variableType": "

The type of the variable. For more information see Variable types.

Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT | SHIPPING_ZIP | USERAGENT

", "labelMapper$key": null } }, @@ -1526,6 +1482,7 @@ "DeleteDetectorVersionRequest$detectorVersionId": "

The ID of the detector version to delete.

", "GetDetectorVersionRequest$detectorVersionId": "

The detector version ID.

", "GetDetectorVersionResult$detectorVersionId": "

The detector version ID.

", + "GetEventPredictionRequest$detectorVersionId": "

The detector version ID.

", "GetRulesRequest$ruleVersion": "

The rule version.

", "Rule$ruleVersion": "

The rule version.

", "RuleDetail$ruleVersion": "

The rule version.

", diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index 4c6248a676f..fd051d8ad16 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -3,7 +3,7 @@ "service": "

Amazon FSx is a fully managed service that makes it easy for storage and application administrators to launch and use shared file storage.

", "operations": { "CancelDataRepositoryTask": "

Cancels an existing Amazon FSx for Lustre data repository task if that task is in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx does the following.

", - "CreateBackup": "

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

For more information, see https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-backups.html.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", + "CreateBackup": "

Creates a backup of an existing Amazon FSx file system. Creating regular backups for your file system is a best practice, enabling you to restore a file system from a backup if an issue arises with the original file system.

For Amazon FSx for Lustre file systems, you can create a backup only for file systems with the following configuration:

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Lustre backups.

For more information about backing up Amazon FSx for Lustre file systems, see Working with FSx for Windows backups.

If a backup with the specified client request token exists, and the parameters match, this operation returns the description of the existing backup. If a backup specified client request token exists, and the parameters don't match, this operation returns IncompatibleParameterError. If a backup with the specified client request token doesn't exist, CreateBackup does the following:

By using the idempotent operation, you can retry a CreateBackup operation without the risk of creating an extra backup. This approach can be useful when an initial call fails in a way that makes it unclear whether a backup was created. If you use the same client request token and the initial call created a backup, the operation returns a successful result because all the parameters are the same.

The CreateBackup operation returns while the backup's lifecycle state is still CREATING. You can check the backup creation status by calling the DescribeBackups operation, which returns the backup state along with other information.

", "CreateDataRepositoryTask": "

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repository. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Using Data Repository Tasks. To learn more about linking a data repository to your file system, see Setting the Export Prefix.

", "CreateFileSystem": "

Creates a new, empty Amazon FSx file system.

If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

", "CreateFileSystemFromBackup": "

Creates a new Amazon FSx file system from an existing Amazon FSx backup.

If a file system with the specified client request token exists and the parameters match, this operation returns the description of the file system. If a client request token specified by the file system exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, this operation does the following:

Parameters like Active Directory, default share name, automatic backup, and backup settings default to the parameters of the file system that was backed up, unless overridden. You can explicitly supply other settings.

By using the idempotent operation, you can retry a CreateFileSystemFromBackup call without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

The CreateFileSystemFromBackup call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

", @@ -15,7 +15,7 @@ "ListTagsForResource": "

Lists tags for an Amazon FSx file systems and backups in the case of Amazon FSx for Windows File Server.

When retrieving all tags, you can optionally specify the MaxResults parameter to limit the number of tags in a response. If more tags remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This action is used in an iterative process to retrieve a list of your tags. ListTagsForResource is called first without a NextTokenvalue. Then the action continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this action, keep the following in mind:

", "TagResource": "

Tags an Amazon FSx resource.

", "UntagResource": "

This action removes a tag from an Amazon FSx resource.

", - "UpdateFileSystem": "

Use this operation to update the configuration of an existing Amazon FSx file system. For an Amazon FSx for Lustre file system, you can update only the WeeklyMaintenanceStartTime. For an Amazon for Windows File Server file system, you can update the following properties:

You can update multiple properties in a single request.

" + "UpdateFileSystem": "

Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

For Amazon FSx for Windows File Server file systems, you can update the following properties:

For Amazon FSx for Lustre file systems, you can update the following properties:

" }, "shapes": { "AWSAccountId": { @@ -86,9 +86,9 @@ "AutoImportPolicyType": { "base": null, "refs": { - "CreateFileSystemLustreConfiguration$AutoImportPolicy": "

Use this property to turn the Autoimport feature on and off. AutoImport enables your FSx for Lustre file system to automatically update its contents with changes that have been made to its linked Amazon S3 data repository. You can set the policy to have one the following values:

", - "DataRepositoryConfiguration$AutoImportPolicy": "

Describes the data repository's AutoImportPolicy. AutoImport enables your FSx for Lustre file system to automatically update its contents with changes that have been made to its linked Amazon S3 data repository. The policy can have the following values:

", - "UpdateFileSystemLustreConfiguration$AutoImportPolicy": "

Use this property to turn the Autoimport feature on and off. AutoImport enables your FSx for Lustre file system to automatically update its contents with changes that have been made to its linked Amazon S3 data repository. You can set the policy to have one the following values:

" + "CreateFileSystemLustreConfiguration$AutoImportPolicy": "

(Optional) Use this property to configure the AutoImport feature on the file system's linked Amazon S3 data repository. You use AutoImport to update the contents of your FSx for Lustre file system automatically with changes that occur in the linked S3 data repository. AutoImportPolicy can have the following values:

For more information, see Automatically import updates from your S3 bucket.

", + "DataRepositoryConfiguration$AutoImportPolicy": "

Describes the file system's linked S3 data repository's AutoImportPolicy. The AutoImportPolicy configures how your FSx for Lustre file system automatically updates its contents with changes that occur in the linked S3 data repository. AutoImportPolicy can have the following values:

For more information, see Automatically import updates from your S3 bucket.

", + "UpdateFileSystemLustreConfiguration$AutoImportPolicy": "

(Optional) Use this property to configure the AutoImport feature on the file system's linked Amazon S3 data repository. You use AutoImport to update the contents of your FSx for Lustre file system automatically with changes that occur in the linked S3 data repository. AutoImportPolicy can have the following values:

For more information, see Automatically import updates from your S3 bucket.

" } }, "AutomaticBackupRetentionDays": { @@ -158,11 +158,11 @@ "BackupType": { "base": "

The type of the backup.

", "refs": { - "Backup$Type": "

The type of the backup.

" + "Backup$Type": "

The type of the file system backup.

" } }, "Backups": { - "base": "

A list of backups.

", + "base": "

A list of file system backups.

", "refs": { "DescribeBackupsResponse$Backups": "

Any array of backups.

" } @@ -296,7 +296,7 @@ "DataRepositoryLifecycle": { "base": null, "refs": { - "DataRepositoryConfiguration$Lifecycle": "

Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. The lifecycle can have the following values:

" + "DataRepositoryConfiguration$Lifecycle": "

Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. The lifecycle can have the following values:

" } }, "DataRepositoryTask": { @@ -660,7 +660,7 @@ "base": null, "refs": { "CompletionReport$Enabled": "

Set Enabled to True to generate a CompletionReport when the task completes. If set to true, then you need to provide a report Scope, Path, and Format. Set Enabled to False if you do not want a CompletionReport generated when the task completes.

", - "CreateFileSystemLustreConfiguration$CopyTagsToBackups": "

A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

", + "CreateFileSystemLustreConfiguration$CopyTagsToBackups": "

A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

For more information, see Working with backups.

", "CreateFileSystemWindowsConfiguration$CopyTagsToBackups": "

A boolean flag indicating whether tags for the file system should be copied to backups. This value defaults to false. If it's set to true, all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.

", "DeleteFileSystemLustreConfiguration$SkipFinalBackup": "

Set SkipFinalBackup to false if you want to take a final backup of the file system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the DeleteFileSystem operation is invoked. (Default = true)

", "DeleteFileSystemWindowsConfiguration$SkipFinalBackup": "

By default, Amazon FSx for Windows takes a final backup on your behalf when the DeleteFileSystem operation is invoked. Doing this helps protect you from data loss, and we highly recommend taking the final backup. If you want to skip this backup, use this flag to do so.

", @@ -732,7 +732,7 @@ "LustreDeploymentType": { "base": null, "refs": { - "CreateFileSystemLustreConfiguration$DeploymentType": "

Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

This option can only be set for for PERSISTENT_1 deployments types.

Choose PERSISTENT_1 deployment type for longer-term storage and workloads and encryption of data in transit. To learn more about deployment types, see FSx for Lustre Deployment Options.

Encryption of data in-transit is automatically enabled when you access a SCRATCH_2 or PERSISTENT_1 file system from Amazon EC2 instances that support this feature. (Default = SCRATCH_1)

Encryption of data in-transit for SCRATCH_2 and PERSISTENT_1 deployment types is supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in Transit.

", + "CreateFileSystemLustreConfiguration$DeploymentType": "

Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

Choose PERSISTENT_1 deployment type for longer-term storage and workloads and encryption of data in transit. To learn more about deployment types, see FSx for Lustre Deployment Options.

Encryption of data in-transit is automatically enabled when you access a SCRATCH_2 or PERSISTENT_1 file system from Amazon EC2 instances that support this feature. (Default = SCRATCH_1)

Encryption of data in-transit for SCRATCH_2 and PERSISTENT_1 deployment types is supported when accessed from supported instance types in supported AWS Regions. To learn more, Encrypting Data in Transit.

", "LustreFileSystemConfiguration$DeploymentType": "

The deployment type of the FSX for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

The PERSISTENT_1 deployment type is used for longer-term storage and workloads and encryption of data in transit. To learn more about deployment types, see FSx for Lustre Deployment Options. (Default = SCRATCH_1)

" } }, diff --git a/models/apis/kendra/2019-02-03/api-2.json b/models/apis/kendra/2019-02-03/api-2.json index 49a3f070459..5c915c46b93 100644 --- a/models/apis/kendra/2019-02-03/api-2.json +++ b/models/apis/kendra/2019-02-03/api-2.json @@ -751,7 +751,7 @@ }, "DataSourceInclusionsExclusionsStringsMember":{ "type":"string", - "max":50, + "max":150, "min":1 }, "DataSourceName":{ @@ -893,7 +893,8 @@ "ConnectionConfiguration":{"shape":"ConnectionConfiguration"}, "VpcConfiguration":{"shape":"DataSourceVpcConfiguration"}, "ColumnConfiguration":{"shape":"ColumnConfiguration"}, - "AclConfiguration":{"shape":"AclConfiguration"} + "AclConfiguration":{"shape":"AclConfiguration"}, + "SqlConfiguration":{"shape":"SqlConfiguration"} } }, "DatabaseEngineType":{ @@ -1551,6 +1552,13 @@ "max":36, "min":1 }, + "QueryIdentifiersEnclosingOption":{ + "type":"string", + "enum":[ + "DOUBLE_QUOTES", + "NONE" + ] + }, "QueryRequest":{ "type":"structure", "required":[ @@ -1565,7 +1573,8 @@ "RequestedDocumentAttributes":{"shape":"DocumentAttributeKeyList"}, "QueryResultTypeFilter":{"shape":"QueryResultType"}, "PageNumber":{"shape":"Integer"}, - "PageSize":{"shape":"Integer"} + "PageSize":{"shape":"Integer"}, + "SortingConfiguration":{"shape":"SortingConfiguration"} } }, "QueryResult":{ @@ -1871,7 +1880,8 @@ "members":{ "Facetable":{"shape":"Boolean"}, "Searchable":{"shape":"Boolean"}, - "Displayable":{"shape":"Boolean"} + "Displayable":{"shape":"Boolean"}, + "Sortable":{"shape":"Boolean"} } }, "SecretArn":{ @@ -1981,6 +1991,30 @@ "type":"string", "enum":["SHAREPOINT_ONLINE"] }, + "SortOrder":{ + "type":"string", + "enum":[ + "DESC", + "ASC" + ] + }, + "SortingConfiguration":{ + "type":"structure", + "required":[ + "DocumentAttributeKey", + "SortOrder" + ], + "members":{ + "DocumentAttributeKey":{"shape":"DocumentAttributeKey"}, + "SortOrder":{"shape":"SortOrder"} + } + }, + "SqlConfiguration":{ + "type":"structure", + "members":{ + "QueryIdentifiersEnclosingOption":{"shape":"QueryIdentifiersEnclosingOption"} + } + }, "StartDataSourceSyncJobRequest":{ "type":"structure", "required":[ diff --git a/models/apis/kendra/2019-02-03/docs-2.json b/models/apis/kendra/2019-02-03/docs-2.json index 7aa64bd71f9..da699a98ca5 100644 --- a/models/apis/kendra/2019-02-03/docs-2.json +++ b/models/apis/kendra/2019-02-03/docs-2.json @@ -150,6 +150,7 @@ "Search$Facetable": "

Indicates that the field can be used to create search facets, a count of results for each value in the field. The default is false .

", "Search$Searchable": "

Determines whether the field is used in the search. If the Searchable field is true, you can use relevance tuning to manually tune how Amazon Kendra weights the field in the search. The default is true for string fields and false for number and date fields.

", "Search$Displayable": "

Determines whether the field is returned in the query response. The default is true.

", + "Search$Sortable": "

Determines whether the field can be used to sort the results of a query. If you specify sorting on a field that does not have Sortable set to true, Amazon Kendra returns an exception. The default is false.

", "ServiceNowKnowledgeArticleConfiguration$CrawlAttachments": "

Indicates whether Amazon Kendra should index attachments to knowledge articles.

", "ServiceNowServiceCatalogConfiguration$CrawlAttachments": "

Indicates whether Amazon Kendra should crawl attachments to the service catalog items.

", "SharePointConfiguration$CrawlAttachments": "

TRUE to include attachments to documents stored in your Microsoft SharePoint site in the index; otherwise, FALSE.

", @@ -313,7 +314,7 @@ "ServiceNowServiceCatalogConfiguration$IncludeAttachmentFilePatterns": "

Determines the types of file attachments that are included in the index.

", "ServiceNowServiceCatalogConfiguration$ExcludeAttachmentFilePatterns": "

Determines the types of file attachments that are excluded from the index.

", "SharePointConfiguration$InclusionPatterns": "

A list of regular expression patterns. Documents that match the patterns are included in the index. Documents that don't match the patterns are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.

The regex is applied to the display URL of the SharePoint document.

", - "SharePointConfiguration$ExclusionPatterns": "

A list of regulary expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

The regex is applied to the display URL of the SharePoint document.

" + "SharePointConfiguration$ExclusionPatterns": "

A list of regular expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

The regex is applied to the display URL of the SharePoint document.

" } }, "DataSourceInclusionsExclusionsStringsMember": { @@ -536,7 +537,8 @@ "DocumentAttribute$Key": "

The identifier for the attribute.

", "DocumentAttributeKeyList$member": null, "Facet$DocumentAttributeKey": "

The unique key for the document attribute.

", - "FacetResult$DocumentAttributeKey": "

The key for the facet values. This is the same as the DocumentAttributeKey provided in the query.

" + "FacetResult$DocumentAttributeKey": "

The key for the facet values. This is the same as the DocumentAttributeKey provided in the query.

", + "SortingConfiguration$DocumentAttributeKey": "

The name of the document attribute used to sort the response. You can use any field that has the Sortable flag set to true.

You can also sort by any of the following built-in attributes:

" } }, "DocumentAttributeKeyList": { @@ -1050,6 +1052,12 @@ "SubmitFeedbackRequest$QueryId": "

The identifier of the specific query for which you are submitting feedback. The query ID is returned in the response to the operation.

" } }, + "QueryIdentifiersEnclosingOption": { + "base": null, + "refs": { + "SqlConfiguration$QueryIdentifiersEnclosingOption": "

Determines whether Amazon Kendra encloses SQL identifiers in double quotes (\") when making a database query.

By default, Amazon Kendra passes SQL identifiers the way that they are entered into the data source configuration. It does not change the case of identifiers or enclose them in quotes.

PostgreSQL internally converts uppercase characters to lower case characters in identifiers unless they are quoted. Choosing this option encloses identifiers in quotes so that PostgreSQL does not convert the character's case.

For MySQL databases, you must enable the ansi_quotes option when you choose this option.

" + } + }, "QueryRequest": { "base": null, "refs": { @@ -1367,6 +1375,24 @@ "SharePointConfiguration$SharePointVersion": "

The version of Microsoft SharePoint that you are using as a data source.

" } }, + "SortOrder": { + "base": null, + "refs": { + "SortingConfiguration$SortOrder": "

The order that the results should be returned in. In case of ties, the relevance assigned to the result by Amazon Kendra is used as the tie-breaker.

" + } + }, + "SortingConfiguration": { + "base": "

Specifies the document attribute to use to sort the response to a Amazon Kendra query. You can specify a single attribute for sorting. The attribute must have the Sortable flag set to true, otherwise Amazon Kendra returns an exception.

", + "refs": { + "QueryRequest$SortingConfiguration": "

Provides information that determines how the results of the query are sorted. You can set the field that Amazon Kendra should sort the results on, and specify whether the results should be sorted in ascending or descending order. In the case of ties in sorting the results, the results are sorted by relevance.

If you don't provide sorting configuration, the results are sorted by the relevance that Amazon Kendra determines for the result.

" + } + }, + "SqlConfiguration": { + "base": "

Provides information that configures Amazon Kendra to use a SQL database.

", + "refs": { + "DatabaseConfiguration$SqlConfiguration": "

Provides information about how Amazon Kendra uses quote marks around SQL identifiers when querying a database data source.

" + } + }, "StartDataSourceSyncJobRequest": { "base": null, "refs": { @@ -1512,7 +1538,7 @@ "DescribeFaqResponse$UpdatedAt": "

The date and time that the FAQ was last updated.

", "DescribeIndexResponse$CreatedAt": "

The Unix datetime that the index was created.

", "DescribeIndexResponse$UpdatedAt": "

The Unix datetime that the index was last updated.

", - "DocumentAttributeValue$DateValue": "

A date value expressed as seconds from the Unix epoch.

", + "DocumentAttributeValue$DateValue": "

A date expressed as an ISO 8601 string.

", "FaqSummary$CreatedAt": "

The UNIX datetime that the FAQ was added to the index.

", "FaqSummary$UpdatedAt": "

The UNIX datetime that the FAQ was last updated.

", "IndexConfigurationSummary$CreatedAt": "

The Unix timestamp when the index was created.

", diff --git a/models/apis/macie2/2020-01-01/api-2.json b/models/apis/macie2/2020-01-01/api-2.json index f35e576ca78..0a0bc6a4370 100644 --- a/models/apis/macie2/2020-01-01/api-2.json +++ b/models/apis/macie2/2020-01-01/api-2.json @@ -5177,6 +5177,10 @@ "UsageStatisticsFilter": { "type": "structure", "members": { + "comparator": { + "shape": "UsageStatisticsFilterComparator", + "locationName": "comparator" + }, "key": { "shape": "UsageStatisticsFilterKey", "locationName": "key" @@ -5187,10 +5191,25 @@ } } }, + "UsageStatisticsFilterComparator": { + "type": "string", + "enum": [ + "GT", + "GTE", + "LT", + "LTE", + "EQ", + "NE", + "CONTAINS" + ] + }, "UsageStatisticsFilterKey": { "type": "string", "enum": [ - "accountId" + "accountId", + "serviceLimit", + "freeTrialStartDate", + "total" ] }, "UsageStatisticsSortBy": { @@ -5210,7 +5229,9 @@ "type": "string", "enum": [ "accountId", - "total" + "total", + "serviceLimitValue", + "freeTrialStartDate" ] }, "UsageTotal": { diff --git a/models/apis/macie2/2020-01-01/docs-2.json b/models/apis/macie2/2020-01-01/docs-2.json index 895e48cd0ff..795b3d7cfe1 100644 --- a/models/apis/macie2/2020-01-01/docs-2.json +++ b/models/apis/macie2/2020-01-01/docs-2.json @@ -785,9 +785,9 @@ } }, "ObjectCountByEncryptionType" : { - "base" : "

The total number of objects that are in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted.

", + "base" : "

Provides information about the number of objects that are in an S3 bucket and use certain types of server-side encryption, use client-side encryption, or aren't encrypted.

", "refs" : { - "BucketMetadata$ObjectCountByEncryptionType" : "

The total number of objects that are in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted.

" + "BucketMetadata$ObjectCountByEncryptionType" : "

The total number of objects that are in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted or use client-side encryption.

" } }, "OrderBy" : { @@ -1092,15 +1092,21 @@ } }, "UsageStatisticsFilter" : { - "base" : "

Specifies criteria for filtering the results of a query for account quotas and usage data.

", + "base" : "

Specifies a condition for filtering the results of a query for account quotas and usage data.

", "refs" : { "__listOfUsageStatisticsFilter$member" : null } }, + "UsageStatisticsFilterComparator" : { + "base" : "

The operator to use in a condition that filters the results of a query for account quotas and usage data. Valid values are:

", + "refs" : { + "UsageStatisticsFilter$Comparator" : "

The operator to use in the condition. If the value for the key property is accountId, this value must be CONTAINS. If the value for the key property is any other supported field, this value can be EQ, GT, GTE, LT, LTE, or NE.

" + } + }, "UsageStatisticsFilterKey" : { - "base" : "

The field to use to filter the results of a query for account quotas and usage data:

", + "base" : "

The field to use in a condition that filters the results of a query for account quotas and usage data. Valid values are:

", "refs" : { - "UsageStatisticsFilter$Key" : "

The field to use to filter the results. The only supported value is accountId.

" + "UsageStatisticsFilter$Key" : "

The field to use in the condition.

" } }, "UsageStatisticsSortBy" : { @@ -1336,7 +1342,7 @@ "__listOfUsageStatisticsFilter" : { "base" : null, "refs" : { - "GetUsageStatisticsRequest$FilterBy" : "

The criteria to use to filter the query results.

" + "GetUsageStatisticsRequest$FilterBy" : "

An array of objects, one for each condition to use to filter the query results. If the array contains more than one object, Amazon Macie uses an AND operator to join the conditions specified by the objects.

" } }, "__listOfUsageTotal" : { @@ -1371,7 +1377,7 @@ "SimpleScopeTerm$Values" : "

An array that lists one or more values to use in the condition.

", "TestCustomDataIdentifierRequest$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4 - 90 characters.

", "TestCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 4 - 90 characters.

", - "UsageStatisticsFilter$Values" : "

An array that lists the AWS account ID for each account to include in the results.

" + "UsageStatisticsFilter$Values" : "

An array that lists values to use in the condition, based on the value for the field specified by the key property. If the value for the key property is accountId, this array can specify multiple values. Otherwise, this array can specify only one value.

Valid values for each supported field are:

" } }, "__long" : { @@ -1410,10 +1416,10 @@ "GetBucketStatisticsResponse$SizeInBytesCompressed" : "

The total compressed storage size, in bytes, of all the buckets.

", "GetInvitationsCountResponse$InvitationsCount" : "

The total number of invitations that were received by the account, not including the currently accepted invitation.

", "GroupCount$Count" : "

The total number of findings in the group of query results.

", - "ObjectCountByEncryptionType$CustomerManaged" : "

Reserved for future use.

", - "ObjectCountByEncryptionType$KmsManaged" : "

Reserved for future use.

", - "ObjectCountByEncryptionType$S3Managed" : "

Reserved for future use.

", - "ObjectCountByEncryptionType$Unencrypted" : "

Reserved for future use.

", + "ObjectCountByEncryptionType$CustomerManaged" : "

The total number of objects that are encrypted using a customer-managed key. The objects use customer-provided server-side (SSE-C) encryption.

", + "ObjectCountByEncryptionType$KmsManaged" : "

The total number of objects that are encrypted using an AWS Key Management Service (AWS KMS) customer master key (CMK). The objects use AWS KMS AWS-managed (AWS-KMS) encryption or AWS KMS customer-managed (SSE-KMS) encryption.

", + "ObjectCountByEncryptionType$S3Managed" : "

The total number of objects that are encrypted using an Amazon S3-managed key. The objects use Amazon S3-managed (SSE-S3) encryption.

", + "ObjectCountByEncryptionType$Unencrypted" : "

The total number of objects that aren't encrypted or use client-side encryption.

", "S3Object$Size" : "

The total storage size, in bytes, of the object.

", "SensitiveDataItem$TotalCount" : "

The total number of occurrences of the sensitive data that was detected.

", "ServiceLimit$Value" : "

The value for the metric specified by the UsageByAccount.type field in the response.

", @@ -1627,7 +1633,7 @@ "S3Bucket$CreatedAt" : "

The date and time, in UTC and extended ISO 8601 format, when the bucket was created.

", "S3Object$LastModified" : "

The date and time, in UTC and extended ISO 8601 format, when the object was last modified.

", "SessionContextAttributes$CreationDate" : "

The date and time, in UTC and ISO 8601 format, when the credentials were issued.

", - "UsageRecord$FreeTrialStartDate" : "

The date and time, in UTC and extended ISO 8601 format, when the free trial period started for the account. This value is null if the account didn't participate in the free trial.

" + "UsageRecord$FreeTrialStartDate" : "

The date and time, in UTC and extended ISO 8601 format, when the free trial started for the account.

" } } } diff --git a/models/apis/mediaconnect/2018-11-14/api-2.json b/models/apis/mediaconnect/2018-11-14/api-2.json index 38e3bcdf058..5f27ca7f1ce 100644 --- a/models/apis/mediaconnect/2018-11-14/api-2.json +++ b/models/apis/mediaconnect/2018-11-14/api-2.json @@ -1092,6 +1092,10 @@ "shape": "__string", "locationName": "entitlementArn" }, + "EntitlementStatus": { + "shape": "EntitlementStatus", + "locationName": "entitlementStatus" + }, "Name": { "shape": "__string", "locationName": "name" @@ -1107,6 +1111,13 @@ "Name" ] }, + "EntitlementStatus": { + "type": "string", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "FailoverConfig": { "type": "structure", "members": { @@ -1213,6 +1224,10 @@ "shape": "Encryption", "locationName": "encryption" }, + "EntitlementStatus": { + "shape": "EntitlementStatus", + "locationName": "entitlementStatus" + }, "Name": { "shape": "__string", "locationName": "name" @@ -2027,6 +2042,10 @@ "location": "uri", "locationName": "entitlementArn" }, + "EntitlementStatus": { + "shape": "EntitlementStatus", + "locationName": "entitlementStatus" + }, "FlowArn": { "shape": "__string", "location": "uri", diff --git a/models/apis/mediaconnect/2018-11-14/docs-2.json b/models/apis/mediaconnect/2018-11-14/docs-2.json index 52bee3988a5..e1f18b154be 100644 --- a/models/apis/mediaconnect/2018-11-14/docs-2.json +++ b/models/apis/mediaconnect/2018-11-14/docs-2.json @@ -109,6 +109,14 @@ "__listOfEntitlement$member": null } }, + "EntitlementStatus": { + "base": null, + "refs": { + "Entitlement$EntitlementStatus": "An indication of whether the entitlement is enabled.", + "GrantEntitlementRequest$EntitlementStatus": "An indication of whether the new entitlement should be enabled or disabled as soon as it is created. If you don\u2019t specify the entitlementStatus field in your request, MediaConnect sets it to ENABLED.", + "UpdateFlowEntitlementRequest$EntitlementStatus": "An indication of whether you want to enable the entitlement to allow access, or disable it to stop streaming content to the subscriber\u2019s flow temporarily. If you don\u2019t specify the entitlementStatus field in your request, MediaConnect leaves the value unchanged." + } + }, "FailoverConfig": { "base": "The settings for source failover", "refs": { @@ -345,7 +353,7 @@ "__listOfVpcInterface$member": null } }, - "VpcInterfaceAttachment" : { + "VpcInterfaceAttachment": { "base": "The settings for attaching a VPC interface to an output.", "refs": { "AddOutputRequest$VpcInterfaceAttachment": "The name of the VPC interface attachment to use for this output.", @@ -574,7 +582,7 @@ "VpcInterface$Name": "Immutable and has to be a unique against other VpcInterfaces in this Flow", "VpcInterface$RoleArn": "Role Arn MediaConnect can assumes to create ENIs in customer's account", "VpcInterface$SubnetId": "Subnet must be in the AZ of the Flow", - "VpcInterfaceAttachment$VpcInterfaceName" : "The name of the VPC interface to use for this output.", + "VpcInterfaceAttachment$VpcInterfaceName": "The name of the VPC interface to use for this output.", "VpcInterfaceRequest$Name": "The name of the VPC Interface. This value must be unique within the current flow.", "VpcInterfaceRequest$RoleArn": "Role Arn MediaConnect can assumes to create ENIs in customer's account", "VpcInterfaceRequest$SubnetId": "Subnet must be in the AZ of the Flow", diff --git a/models/apis/mediapackage/2017-10-12/api-2.json b/models/apis/mediapackage/2017-10-12/api-2.json index 04006ec7441..12002788ed1 100644 --- a/models/apis/mediapackage/2017-10-12/api-2.json +++ b/models/apis/mediapackage/2017-10-12/api-2.json @@ -569,7 +569,8 @@ "enum": [ "NONE", "SCTE35_ENHANCED", - "PASSTHROUGH" + "PASSTHROUGH", + "DATERANGE" ], "type": "string" }, @@ -916,8 +917,8 @@ } }, "required": [ - "Id", - "ChannelId" + "ChannelId", + "Id" ], "type": "structure" }, @@ -2513,4 +2514,4 @@ "type": "string" } } -} +} \ No newline at end of file diff --git a/models/apis/mediapackage/2017-10-12/docs-2.json b/models/apis/mediapackage/2017-10-12/docs-2.json index 23ed8610565..418d2898358 100644 --- a/models/apis/mediapackage/2017-10-12/docs-2.json +++ b/models/apis/mediapackage/2017-10-12/docs-2.json @@ -25,9 +25,9 @@ "AdMarkers" : { "base" : null, "refs" : { - "HlsManifest$AdMarkers" : "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", - "HlsManifestCreateOrUpdateParameters$AdMarkers" : "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n", - "HlsPackage$AdMarkers" : "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n" + "HlsManifest$AdMarkers" : "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n\"DATERANGE\" inserts EXT-X-DATERANGE tags to signal ad and program transition events \nin HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds value \nthat is greater than 0.\n", + "HlsManifestCreateOrUpdateParameters$AdMarkers" : "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n\"DATERANGE\" inserts EXT-X-DATERANGE tags to signal ad and program transition events \nin HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds value \nthat is greater than 0.\n", + "HlsPackage$AdMarkers" : "This setting controls how ad markers are included in the packaged OriginEndpoint.\n\"NONE\" will omit all SCTE-35 ad markers from the output.\n\"PASSTHROUGH\" causes the manifest to contain a copy of the SCTE-35 ad\nmarkers (comments) taken directly from the input HTTP Live Streaming (HLS) manifest.\n\"SCTE35_ENHANCED\" generates ad markers and blackout tags based on SCTE-35\nmessages in the input source.\n\"DATERANGE\" inserts EXT-X-DATERANGE tags to signal ad and program transition events \nin HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds value \nthat is greater than 0.\n" } }, "AdTriggers" : { @@ -454,4 +454,4 @@ } } } -} +} \ No newline at end of file diff --git a/models/apis/monitoring/2010-08-01/api-2.json b/models/apis/monitoring/2010-08-01/api-2.json index 6aa50f5d5fa..ddcb3fce3dc 100644 --- a/models/apis/monitoring/2010-08-01/api-2.json +++ b/models/apis/monitoring/2010-08-01/api-2.json @@ -1361,7 +1361,8 @@ "Namespace":{"shape":"Namespace"}, "MetricName":{"shape":"MetricName"}, "Dimensions":{"shape":"DimensionFilters"}, - "NextToken":{"shape":"NextToken"} + "NextToken":{"shape":"NextToken"}, + "RecentlyActive":{"shape":"RecentlyActive"} } }, "ListMetricsOutput":{ @@ -1739,6 +1740,10 @@ "EndTime" ] }, + "RecentlyActive":{ + "type":"string", + "enum":["PT3H"] + }, "ResourceId":{"type":"string"}, "ResourceList":{ "type":"list", diff --git a/models/apis/monitoring/2010-08-01/docs-2.json b/models/apis/monitoring/2010-08-01/docs-2.json index 638ddc7f013..245ef26c63f 100644 --- a/models/apis/monitoring/2010-08-01/docs-2.json +++ b/models/apis/monitoring/2010-08-01/docs-2.json @@ -4,8 +4,8 @@ "operations": { "DeleteAlarms": "

Deletes the specified alarms. You can delete up to 100 alarms in one operation. However, this total can include no more than one composite alarm. For example, you could delete 99 metric alarms and one composite alarms with one operation, but you can't delete two composite alarms with one operation.

In the event of an error, no alarms are deleted.

It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.

To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule of one of the alarms to False.

Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.

", "DeleteAnomalyDetector": "

Deletes the specified anomaly detection model from your account.

", - "DeleteDashboards": "

Deletes all dashboards that you specify. You may specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.

", - "DeleteInsightRules": "

Permanently deletes the specified Contributor Insights rules.

If you create a rule, delete it, and then re-create it with the same name, historical data from the first time the rule was created may or may not be available.

", + "DeleteDashboards": "

Deletes all dashboards that you specify. You can specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.

", + "DeleteInsightRules": "

Permanently deletes the specified Contributor Insights rules.

If you create a rule, delete it, and then re-create it with the same name, historical data from the first time the rule was created might not be available.

", "DescribeAlarmHistory": "

Retrieves the history for the specified alarm. You can filter the results by date range or item type. If an alarm name is not specified, the histories for either all metric alarms or all composite alarms are returned.

CloudWatch retains the history of an alarm even if you delete the alarm.

", "DescribeAlarms": "

Retrieves the specified alarms. You can filter the results by specifying a a prefix for the alarm name, the alarm state, or a prefix for any action.

", "DescribeAlarmsForMetric": "

Retrieves the alarms for the specified metric. To filter the results, specify a statistic, period, or unit.

", @@ -16,21 +16,21 @@ "EnableAlarmActions": "

Enables the actions for the specified alarms.

", "EnableInsightRules": "

Enables the specified Contributor Insights rules. When rules are enabled, they immediately begin analyzing log data.

", "GetDashboard": "

Displays the details of the dashboard that you specify.

To copy an existing dashboard, use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard to create the copy.

", - "GetInsightRuleReport": "

This operation returns the time series data collected by a Contributor Insights rule. The data includes the identity and number of contributors to the log group.

You can also optionally return one or more statistics about each data point in the time series. These statistics can include the following:

", - "GetMetricData": "

You can use the GetMetricData API to retrieve as many as 500 different metrics in a single request, with a total of as many as 100,800 data points. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

If you omit Unit in your request, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

", + "GetInsightRuleReport": "

This operation returns the time series data collected by a Contributor Insights rule. The data includes the identity and number of contributors to the log group.

You can also optionally return one or more statistics about each data point in the time series. These statistics can include the following:

", + "GetMetricData": "

You can use the GetMetricData API to retrieve as many as 500 different metrics in a single request, with a total of as many as 100,800 data points. You can also optionally perform math expressions on the values of the returned statistics, to create new time series that represent new insights into your data. For example, using Lambda metrics, you could divide the Errors metric by the Invocations metric to get an error rate time series. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide.

Calls to the GetMetricData API have a different pricing structure than calls to GetMetricStatistics. For more information about pricing, see Amazon CloudWatch Pricing.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

If you omit Unit in your request, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

", "GetMetricStatistics": "

Gets statistics for the specified metric.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

Percentile statistics are not available for metrics when any of the metric values are negative numbers.

Amazon CloudWatch retains metric data as follows:

Data points that are initially published with a shorter period are aggregated together for long-term storage. For example, if you collect data using a period of 1 minute, the data remains available for 15 days with 1-minute resolution. After 15 days, this data is still available, but is aggregated and retrievable only with a resolution of 5 minutes. After 63 days, the data is further aggregated and is available with a resolution of 1 hour.

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

For information about metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

", "GetMetricWidgetImage": "

You can use the GetMetricWidgetImage API to retrieve a snapshot graph of one or more Amazon CloudWatch metrics as a bitmap image. You can then embed this image into your services and products, such as wiki pages, reports, and documents. You could also retrieve images regularly, such as every minute, and create your own custom live dashboard.

The graph you retrieve can include all CloudWatch metric graph features, including metric math and horizontal and vertical annotations.

There is a limit of 20 transactions per second for this API. Each GetMetricWidgetImage action has the following limits:

", "ListDashboards": "

Returns a list of the dashboards for your account. If you include DashboardNamePrefix, only those dashboards with names starting with the prefix are listed. Otherwise, all dashboards in your account are listed.

ListDashboards returns up to 1000 results on one page. If there are more than 1000 dashboards, you can call ListDashboards again and include the value you received for NextToken in the first call, to receive the next 1000 results.

", - "ListMetrics": "

List the specified metrics. You can use the returned metrics with GetMetricData or GetMetricStatistics to obtain statistical data.

Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.

After you create a metric, allow up to fifteen minutes before the metric appears. Statistics about the metric, however, are available sooner using GetMetricData or GetMetricStatistics.

", + "ListMetrics": "

List the specified metrics. You can use the returned metrics with GetMetricData or GetMetricStatistics to obtain statistical data.

Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.

After you create a metric, allow up to 15 minutes before the metric appears. You can see statistics about the metric sooner by using GetMetricData or GetMetricStatistics.

ListMetrics doesn't return information about metrics if those metrics haven't reported data in the past two weeks. To retrieve those metrics, use GetMetricData or GetMetricStatistics.

", "ListTagsForResource": "

Displays the tags associated with a CloudWatch resource. Currently, alarms and Contributor Insights rules support tagging.

", "PutAnomalyDetector": "

Creates an anomaly detection model for a CloudWatch metric. You can use the model to display a band of expected normal values when the metric is graphed.

For more information, see CloudWatch Anomaly Detection.

", "PutCompositeAlarm": "

Creates or updates a composite alarm. When you create a composite alarm, you specify a rule expression for the alarm that takes into account the alarm states of other alarms that you have created. The composite alarm goes into ALARM state only if all conditions of the rule are met.

The alarms specified in a composite alarm's rule expression can include metric alarms and other composite alarms.

Using composite alarms can reduce alarm noise. You can create multiple metric alarms, and also create a composite alarm and set up alerts only for the composite alarm. For example, you could create a composite alarm that goes into ALARM state only when more than one of the underlying metric alarms are in ALARM state.

Currently, the only alarm actions that can be taken by composite alarms are notifying SNS topics.

It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.

To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule of one of the alarms to False.

Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed. For a composite alarm, this initial time after creation is the only time that the alarm can be in INSUFFICIENT_DATA state.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

", "PutDashboard": "

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

All dashboards in your account are global, not region-specific.

A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

", - "PutInsightRule": "

Creates a Contributor Insights rule. Rules evaluate log events in a CloudWatch Logs log group, enabling you to find contributor data for the log events in that log group. For more information, see Using Contributor Insights to Analyze High-Cardinality Data.

If you create a rule, delete it, and then re-create it with the same name, historical data from the first time the rule was created may or may not be available.

", + "PutInsightRule": "

Creates a Contributor Insights rule. Rules evaluate log events in a CloudWatch Logs log group, enabling you to find contributor data for the log events in that log group. For more information, see Using Contributor Insights to Analyze High-Cardinality Data.

If you create a rule, delete it, and then re-create it with the same name, historical data from the first time the rule was created might not be available.

", "PutMetricAlarm": "

Creates or updates an alarm and associates it with the specified metric, metric math expression, or anomaly detection model.

Alarms based on anomaly detection models cannot have Auto Scaling actions.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. Any actions associated with the new state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some alarm operations:

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

The first time you create an alarm in the AWS Management Console, the CLI, or by using the PutMetricAlarm API, CloudWatch creates the necessary service-linked role for you. The service-linked role is called AWSServiceRoleForCloudWatchEvents. For more information, see AWS service-linked role.

", "PutMetricData": "

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

", - "SetAlarmState": "

Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to ALARM sends an SNS message.

Metric alarms returns to their actual state quickly, often within seconds. Because the metric alarm state change happens quickly, it is typically only visible in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory.

If you use SetAlarmState on a composite alarm, the composite alarm is not guaranteed to return to its actual state. It will return to its actual state only once any of its children alarms change state. It is also re-evaluated if you update its configuration.

If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling policies, you must include information in the StateReasonData parameter to enable the policy to take the correct action.

", - "TagResource": "

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. Currently, the only CloudWatch resources that can be tagged are alarms and Contributor Insights rules.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a CloudWatch resource.

", + "SetAlarmState": "

Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to ALARM sends an SNS message.

Metric alarms returns to their actual state quickly, often within seconds. Because the metric alarm state change happens quickly, it is typically only visible in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory.

If you use SetAlarmState on a composite alarm, the composite alarm is not guaranteed to return to its actual state. It returns to its actual state only once any of its children alarms change state. It is also reevaluated if you update its configuration.

If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling policies, you must include information in the StateReasonData parameter to enable the policy to take the correct action.

", + "TagResource": "

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. Currently, the only CloudWatch resources that can be tagged are alarms and Contributor Insights rules.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

Tags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

You can associate as many as 50 tags with a CloudWatch resource.

", "UntagResource": "

Removes one or more tags from the specified resource.

" }, "shapes": { @@ -84,12 +84,12 @@ "AlarmNames$member": null, "CompositeAlarm$AlarmName": "

The name of the alarm.

", "DescribeAlarmHistoryInput$AlarmName": "

The name of the alarm.

", - "DescribeAlarmsInput$ChildrenOfAlarmName": "

If you use this parameter and specify the name of a composite alarm, the operation returns information about the \"children\" alarms of the alarm you specify. These are the metric alarms and composite alarms referenced in the AlarmRule field of the composite alarm that you specify in ChildrenOfAlarmName. Information about the composite alarm that you name in ChildrenOfAlarmName is not returned.

If you specify ChildrenOfAlarmName, you cannot specify any other parameters in the request except for MaxRecords and NextToken. If you do so, you will receive a validation error.

Only the Alarm Name, ARN, StateValue (OK/ALARM/INSUFFICIENT_DATA), and StateUpdatedTimestamp information are returned by this operation when you use this parameter. To get complete information about these alarms, perform another DescribeAlarms operation and specify the parent alarm names in the AlarmNames parameter.

", - "DescribeAlarmsInput$ParentsOfAlarmName": "

If you use this parameter and specify the name of a metric or composite alarm, the operation returns information about the \"parent\" alarms of the alarm you specify. These are the composite alarms that have AlarmRule parameters that reference the alarm named in ParentsOfAlarmName. Information about the alarm that you specify in ParentsOfAlarmName is not returned.

If you specify ParentsOfAlarmName, you cannot specify any other parameters in the request except for MaxRecords and NextToken. If you do so, you will receive a validation error.

Only the Alarm Name and ARN are returned by this operation when you use this parameter. To get complete information about these alarms, perform another DescribeAlarms operation and specify the parent alarm names in the AlarmNames parameter.

", + "DescribeAlarmsInput$ChildrenOfAlarmName": "

If you use this parameter and specify the name of a composite alarm, the operation returns information about the \"children\" alarms of the alarm you specify. These are the metric alarms and composite alarms referenced in the AlarmRule field of the composite alarm that you specify in ChildrenOfAlarmName. Information about the composite alarm that you name in ChildrenOfAlarmName is not returned.

If you specify ChildrenOfAlarmName, you cannot specify any other parameters in the request except for MaxRecords and NextToken. If you do so, you receive a validation error.

Only the Alarm Name, ARN, StateValue (OK/ALARM/INSUFFICIENT_DATA), and StateUpdatedTimestamp information are returned by this operation when you use this parameter. To get complete information about these alarms, perform another DescribeAlarms operation and specify the parent alarm names in the AlarmNames parameter.

", + "DescribeAlarmsInput$ParentsOfAlarmName": "

If you use this parameter and specify the name of a metric or composite alarm, the operation returns information about the \"parent\" alarms of the alarm you specify. These are the composite alarms that have AlarmRule parameters that reference the alarm named in ParentsOfAlarmName. Information about the alarm that you specify in ParentsOfAlarmName is not returned.

If you specify ParentsOfAlarmName, you cannot specify any other parameters in the request except for MaxRecords and NextToken. If you do so, you receive a validation error.

Only the Alarm Name and ARN are returned by this operation when you use this parameter. To get complete information about these alarms, perform another DescribeAlarms operation and specify the parent alarm names in the AlarmNames parameter.

", "MetricAlarm$AlarmName": "

The name of the alarm.

", - "PutCompositeAlarmInput$AlarmName": "

The name for the composite alarm. This name must be unique within your AWS account.

", - "PutMetricAlarmInput$AlarmName": "

The name for the alarm. This name must be unique within your AWS account.

", - "SetAlarmStateInput$AlarmName": "

The name for the alarm. This name must be unique within the AWS account. The maximum length is 255 characters.

" + "PutCompositeAlarmInput$AlarmName": "

The name for the composite alarm. This name must be unique within the Region.

", + "PutMetricAlarmInput$AlarmName": "

The name for the alarm. This name must be unique within the Region.

", + "SetAlarmStateInput$AlarmName": "

The name of the alarm.

" } }, "AlarmNamePrefix": { @@ -131,9 +131,9 @@ "AmazonResourceName": { "base": null, "refs": { - "ListTagsForResourceInput$ResourceARN": "

The ARN of the CloudWatch resource that you want to view tags for.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information on ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

", - "TagResourceInput$ResourceARN": "

The ARN of the CloudWatch resource that you're adding tags to.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information on ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

", - "UntagResourceInput$ResourceARN": "

The ARN of the CloudWatch resource that you're removing tags from.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information on ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

" + "ListTagsForResourceInput$ResourceARN": "

The ARN of the CloudWatch resource that you want to view tags for.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

", + "TagResourceInput$ResourceARN": "

The ARN of the CloudWatch resource that you're adding tags to.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

", + "UntagResourceInput$ResourceARN": "

The ARN of the CloudWatch resource that you're removing tags from.

The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name

The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name

For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference.

" } }, "AnomalyDetector": { @@ -302,7 +302,7 @@ "base": null, "refs": { "DashboardInvalidInputError$dashboardValidationMessages": null, - "PutDashboardOutput$DashboardValidationMessages": "

If the input for PutDashboard was correct and the dashboard was successfully created or modified, this result is empty.

If this result includes only warning messages, then the input was valid enough for the dashboard to be created or modified, but some elements of the dashboard may not render.

If this result includes error messages, the input was not valid and the operation failed.

" + "PutDashboardOutput$DashboardValidationMessages": "

If the input for PutDashboard was correct and the dashboard was successfully created or modified, this result is empty.

If this result includes only warning messages, then the input was valid enough for the dashboard to be created or modified, but some elements of the dashboard might not render.

If this result includes error messages, the input was not valid and the operation failed.

" } }, "DataPath": { @@ -447,7 +447,7 @@ } }, "Dimension": { - "base": "

Expands the identity of a metric.

", + "base": "

A dimension is a name/value pair that is part of the identity of a metric. You can assign up to 10 dimensions to a metric. Because dimensions are part of the unique identifier for a metric, whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric.

", "refs": { "Dimensions$member": null } @@ -467,14 +467,14 @@ "DimensionName": { "base": null, "refs": { - "Dimension$Name": "

The name of the dimension.

", + "Dimension$Name": "

The name of the dimension. Dimension names cannot contain blank spaces or non-ASCII characters.

", "DimensionFilter$Name": "

The dimension name to be matched.

" } }, "DimensionValue": { "base": null, "refs": { - "Dimension$Value": "

The value representing the dimension measurement.

", + "Dimension$Value": "

The value of the dimension.

", "DimensionFilter$Value": "

The value of the dimension to be matched.

" } }, @@ -737,7 +737,7 @@ "InsightRuleMaxResults": { "base": null, "refs": { - "DescribeInsightRulesInput$MaxResults": "

This parameter is not currently used. Reserved for future use. If it is used in the future, the maximum value may be different.

" + "DescribeInsightRulesInput$MaxResults": "

This parameter is not currently used. Reserved for future use. If it is used in the future, the maximum value might be different.

" } }, "InsightRuleMetricDatapoint": { @@ -755,7 +755,7 @@ "InsightRuleMetricList": { "base": null, "refs": { - "GetInsightRuleReportInput$Metrics": "

Specifies which metrics to use for aggregation of contributor values for the report. You can specify one or more of the following metrics:

" + "GetInsightRuleReportInput$Metrics": "

Specifies which metrics to use for aggregation of contributor values for the report. You can specify one or more of the following metrics:

" } }, "InsightRuleMetricName": { @@ -990,7 +990,7 @@ "MetricDataResultMessages": { "base": null, "refs": { - "GetMetricDataOutput$Messages": "

Contains a message about this GetMetricData operation, if the operation results in such a message. An example of a message that may be returned is Maximum number of allowed metrics exceeded. If there is a message, as much of the operation as possible is still executed.

A message appears here only if it is related to the global GetMetricData operation. Any message about a specific metric returned by the operation appears in the MetricDataResult object returned for that metric.

", + "GetMetricDataOutput$Messages": "

Contains a message about this GetMetricData operation, if the operation results in such a message. An example of a message that might be returned is Maximum number of allowed metrics exceeded. If there is a message, as much of the operation as possible is still executed.

A message appears here only if it is related to the global GetMetricData operation. Any message about a specific metric returned by the operation appears in the MetricDataResult object returned for that metric.

", "MetricDataResult$Messages": "

A list of messages with additional information about the data returned.

" } }, @@ -1060,13 +1060,13 @@ "MetricWidgetImage": { "base": null, "refs": { - "GetMetricWidgetImageOutput$MetricWidgetImage": "

The image of the graph, in the output format specified.

" + "GetMetricWidgetImageOutput$MetricWidgetImage": "

The image of the graph, in the output format specified. The output is base64-encoded.

" } }, "Metrics": { "base": null, "refs": { - "ListMetricsOutput$Metrics": "

The metrics.

" + "ListMetricsOutput$Metrics": "

The metrics that match your request.

" } }, "MissingRequiredParameterException": { @@ -1106,7 +1106,7 @@ "ListDashboardsInput$NextToken": "

The token returned by a previous call to indicate that there is more data available.

", "ListDashboardsOutput$NextToken": "

The token that marks the start of the next batch of returned results.

", "ListMetricsInput$NextToken": "

The token returned by a previous call to indicate that there is more data available.

", - "ListMetricsOutput$NextToken": "

The token that marks the start of the next batch of returned results.

" + "ListMetricsOutput$NextToken": "

The token that marks the start of the next batch of returned results.

" } }, "OutputFormat": { @@ -1130,7 +1130,7 @@ "MetricAlarm$Period": "

The period, in seconds, over which the statistic is applied.

", "MetricDataQuery$Period": "

The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData operation that includes a StorageResolution of 1 second.

", "MetricStat$Period": "

The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

", - "PutMetricAlarmInput$Period": "

The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 30, and any multiple of 60.

Period is required for alarms based on static thresholds. If you are creating an alarm based on a metric math expression, you specify the period for each metric within the objects in the Metrics array.

Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing.

An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.

" + "PutMetricAlarmInput$Period": "

The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 30, and any multiple of 60.

Period is required for alarms based on static thresholds. If you are creating an alarm based on a metric math expression, you specify the period for each metric within the objects in the Metrics array.

Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm might often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing.

An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.

" } }, "PutAnomalyDetectorInput": { @@ -1184,6 +1184,12 @@ "AnomalyDetectorExcludedTimeRanges$member": null } }, + "RecentlyActive": { + "base": null, + "refs": { + "ListMetricsInput$RecentlyActive": "

To filter the results to show only metrics that have had data points published in the past three hours, specify this parameter with a value of PT3H. This is the only valid value for this parameter.

The results that are returned are an approximation of the value you specify. There is a low probability that the returned results include metrics with last published data as much as 40 minutes more than the specified time interval.

" + } + }, "ResourceId": { "base": null, "refs": { @@ -1258,11 +1264,11 @@ "refs": { "Datapoint$Unit": "

The standard unit for the data point.

", "DescribeAlarmsForMetricInput$Unit": "

The unit for the metric.

", - "GetMetricStatisticsInput$Unit": "

The unit for a given metric. If you omit Unit, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

", + "GetMetricStatisticsInput$Unit": "

The unit for a given metric. If you omit Unit, all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

", "MetricAlarm$Unit": "

The unit of the metric associated with the alarm.

", "MetricDatum$Unit": "

When you are using a Put operation, this defines what unit you want to use when storing the metric.

In a Get operation, this displays the unit that is used for the metric.

", - "MetricStat$Unit": "

When you are using a Put operation, this defines what unit you want to use when storing the metric.

In a Get operation, if you omit Unit then all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

", - "PutMetricAlarmInput$Unit": "

The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.

If you don't specify Unit, CloudWatch retrieves all unit types that have been published for the metric and attempts to evaluate the alarm. Usually metrics are published with only one unit, so the alarm will work as intended.

However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's behavior is not defined and will behave un-predictably.

We recommend omitting Unit so that you don't inadvertently specify an incorrect unit that is not published for this metric. Doing so causes the alarm to be stuck in the INSUFFICIENT DATA state.

" + "MetricStat$Unit": "

When you are using a Put operation, this defines what unit you want to use when storing the metric.

In a Get operation, if you omit Unit then all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

", + "PutMetricAlarmInput$Unit": "

The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.

If you don't specify Unit, CloudWatch retrieves all unit types that have been published for the metric and attempts to evaluate the alarm. Usually, metrics are published with only one unit, so the alarm works as intended.

However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's behavior is not defined and it behaves predictably.

We recommend omitting Unit so that you don't inadvertently specify an incorrect unit that is not published for this metric. Doing so causes the alarm to be stuck in the INSUFFICIENT DATA state.

" } }, "Stat": { @@ -1354,7 +1360,7 @@ "ListTagsForResourceOutput$Tags": "

The list of tag keys and values associated with the resource you specified.

", "PutCompositeAlarmInput$Tags": "

A list of key-value pairs to associate with the composite alarm. You can associate as many as 50 tags with an alarm.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

", "PutInsightRuleInput$Tags": "

A list of key-value pairs to associate with the Contributor Insights rule. You can associate as many as 50 tags with a rule.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values.

To be able to associate tags with a rule, you must have the cloudwatch:TagResource permission in addition to the cloudwatch:PutInsightRule permission.

If you are using this operation to update an existing Contributor Insights rule, any tags you specify in this parameter are ignored. To change the tags of an existing rule, use TagResource.

", - "PutMetricAlarmInput$Tags": "

A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values.

", + "PutMetricAlarmInput$Tags": "

A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

", "TagResourceInput$Tags": "

The list of key-value pairs to associate with the alarm.

" } }, diff --git a/models/apis/mq/2017-11-27/api-2.json b/models/apis/mq/2017-11-27/api-2.json index 7ed0540a3ce..9dd062b0cf4 100644 --- a/models/apis/mq/2017-11-27/api-2.json +++ b/models/apis/mq/2017-11-27/api-2.json @@ -515,6 +515,10 @@ } }, "shapes" : { + "AuthenticationStrategy" : { + "type" : "string", + "enum" : [ "SIMPLE", "LDAP" ] + }, "AvailabilityZone" : { "type" : "structure", "members" : { @@ -686,6 +690,10 @@ "shape" : "__string", "locationName" : "arn" }, + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "Created" : { "shape" : "__timestampIso8601", "locationName" : "created" @@ -787,6 +795,10 @@ "CreateBrokerInput" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -824,6 +836,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataInput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs" @@ -874,6 +890,10 @@ "CreateBrokerRequest" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -911,6 +931,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataInput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs" @@ -961,6 +985,10 @@ "CreateConfigurationInput" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "EngineType" : { "shape" : "EngineType", "locationName" : "engineType" @@ -986,6 +1014,10 @@ "shape" : "__string", "locationName" : "arn" }, + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "Created" : { "shape" : "__timestampIso8601", "locationName" : "created" @@ -1007,6 +1039,10 @@ "CreateConfigurationRequest" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "EngineType" : { "shape" : "EngineType", "locationName" : "engineType" @@ -1032,6 +1068,10 @@ "shape" : "__string", "locationName" : "arn" }, + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "Created" : { "shape" : "__timestampIso8601", "locationName" : "created" @@ -1274,6 +1314,10 @@ "DescribeBrokerOutput" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -1326,6 +1370,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataOutput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "LogsSummary", "locationName" : "logs" @@ -1334,6 +1382,10 @@ "shape" : "WeeklyStartTime", "locationName" : "maintenanceWindowStartTime" }, + "PendingAuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "pendingAuthenticationStrategy" + }, "PendingEngineVersion" : { "shape" : "__string", "locationName" : "pendingEngineVersion" @@ -1342,6 +1394,10 @@ "shape" : "__string", "locationName" : "pendingHostInstanceType" }, + "PendingLdapServerMetadata" : { + "shape" : "LdapServerMetadataOutput", + "locationName" : "pendingLdapServerMetadata" + }, "PendingSecurityGroups" : { "shape" : "__listOf__string", "locationName" : "pendingSecurityGroups" @@ -1386,6 +1442,10 @@ "DescribeBrokerResponse" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -1438,6 +1498,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataOutput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "LogsSummary", "locationName" : "logs" @@ -1446,6 +1510,10 @@ "shape" : "WeeklyStartTime", "locationName" : "maintenanceWindowStartTime" }, + "PendingAuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "pendingAuthenticationStrategy" + }, "PendingEngineVersion" : { "shape" : "__string", "locationName" : "pendingEngineVersion" @@ -1454,6 +1522,10 @@ "shape" : "__string", "locationName" : "pendingHostInstanceType" }, + "PendingLdapServerMetadata" : { + "shape" : "LdapServerMetadataOutput", + "locationName" : "pendingLdapServerMetadata" + }, "PendingSecurityGroups" : { "shape" : "__listOf__string", "locationName" : "pendingSecurityGroups" @@ -1502,6 +1574,10 @@ "shape" : "__string", "locationName" : "arn" }, + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "Created" : { "shape" : "__timestampIso8601", "locationName" : "created" @@ -1734,6 +1810,100 @@ "httpStatusCode" : 500 } }, + "LdapServerMetadataInput" : { + "type" : "structure", + "members" : { + "Hosts" : { + "shape" : "__listOf__string", + "locationName" : "hosts" + }, + "RoleBase" : { + "shape" : "__string", + "locationName" : "roleBase" + }, + "RoleName" : { + "shape" : "__string", + "locationName" : "roleName" + }, + "RoleSearchMatching" : { + "shape" : "__string", + "locationName" : "roleSearchMatching" + }, + "RoleSearchSubtree" : { + "shape" : "__boolean", + "locationName" : "roleSearchSubtree" + }, + "ServiceAccountPassword" : { + "shape" : "__string", + "locationName" : "serviceAccountPassword" + }, + "ServiceAccountUsername" : { + "shape" : "__string", + "locationName" : "serviceAccountUsername" + }, + "UserBase" : { + "shape" : "__string", + "locationName" : "userBase" + }, + "UserRoleName" : { + "shape" : "__string", + "locationName" : "userRoleName" + }, + "UserSearchMatching" : { + "shape" : "__string", + "locationName" : "userSearchMatching" + }, + "UserSearchSubtree" : { + "shape" : "__boolean", + "locationName" : "userSearchSubtree" + } + } + }, + "LdapServerMetadataOutput" : { + "type" : "structure", + "members" : { + "Hosts" : { + "shape" : "__listOf__string", + "locationName" : "hosts" + }, + "RoleBase" : { + "shape" : "__string", + "locationName" : "roleBase" + }, + "RoleName" : { + "shape" : "__string", + "locationName" : "roleName" + }, + "RoleSearchMatching" : { + "shape" : "__string", + "locationName" : "roleSearchMatching" + }, + "RoleSearchSubtree" : { + "shape" : "__boolean", + "locationName" : "roleSearchSubtree" + }, + "ServiceAccountUsername" : { + "shape" : "__string", + "locationName" : "serviceAccountUsername" + }, + "UserBase" : { + "shape" : "__string", + "locationName" : "userBase" + }, + "UserRoleName" : { + "shape" : "__string", + "locationName" : "userRoleName" + }, + "UserSearchMatching" : { + "shape" : "__string", + "locationName" : "userSearchMatching" + }, + "UserSearchSubtree" : { + "shape" : "__boolean", + "locationName" : "userSearchSubtree" + } + } + }, "ListBrokersOutput" : { "type" : "structure", "members" : { @@ -2108,6 +2278,10 @@ "UpdateBrokerInput" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -2124,6 +2298,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataInput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs" @@ -2137,6 +2315,10 @@ "UpdateBrokerOutput" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -2157,6 +2339,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataOutput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs" @@ -2170,6 +2356,10 @@ "UpdateBrokerRequest" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -2191,6 +2381,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataInput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs" @@ -2205,6 +2399,10 @@ "UpdateBrokerResponse" : { "type" : "structure", "members" : { + "AuthenticationStrategy" : { + "shape" : "AuthenticationStrategy", + "locationName" : "authenticationStrategy" + }, "AutoMinorVersionUpgrade" : { "shape" : "__boolean", "locationName" : "autoMinorVersionUpgrade" @@ -2225,6 +2423,10 @@ "shape" : "__string", "locationName" : "hostInstanceType" }, + "LdapServerMetadata" : { + "shape" : "LdapServerMetadataOutput", + "locationName" : "ldapServerMetadata" + }, "Logs" : { "shape" : "Logs", "locationName" : "logs" diff --git a/models/apis/mq/2017-11-27/docs-2.json b/models/apis/mq/2017-11-27/docs-2.json index 53eb9e2c33f..3e4d1eff35c 100644 --- a/models/apis/mq/2017-11-27/docs-2.json +++ b/models/apis/mq/2017-11-27/docs-2.json @@ -26,6 +26,19 @@ "UpdateUser" : "Updates the information for an ActiveMQ user." }, "shapes" : { + "AuthenticationStrategy" : { + "base" : "The authentication strategy used to secure the broker.", + "refs" : { + "Configuration$AuthenticationStrategy" : "The authentication strategy associated with the configuration.", + "CreateBrokerInput$AuthenticationStrategy" : "The authentication strategy used to secure the broker.", + "CreateConfigurationInput$AuthenticationStrategy" : "The authentication strategy associated with the configuration.", + "CreateConfigurationOutput$AuthenticationStrategy" : "The authentication strategy associated with the configuration.", + "DescribeBrokerOutput$AuthenticationStrategy" : "The authentication strategy used to secure the broker.", + "DescribeBrokerOutput$PendingAuthenticationStrategy" : "The authentication strategy that will be applied when the broker is rebooted.", + "UpdateBrokerInput$AuthenticationStrategy" : "The authentication strategy used to secure the broker.", + "UpdateBrokerOutput$AuthenticationStrategy" : "The authentication strategy used to secure the broker." + } + }, "AvailabilityZone" : { "base" : "Name of the availability zone.", "refs" : { @@ -127,7 +140,7 @@ "refs" : { } }, "CreateBrokerInput" : { - "base" : "Required. The time period during which Amazon MQ applies pending updates or patches to the broker.", + "base" : "Required. The version of the broker engine. For a list of supported engine versions, see https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/broker-engine.html", "refs" : { } }, "CreateBrokerOutput" : { @@ -213,6 +226,21 @@ "base" : "Returns information about an error.", "refs" : { } }, + "LdapServerMetadataInput" : { + "base" : "The metadata of the LDAP server used to authenticate and authorize connections to the broker.", + "refs" : { + "CreateBrokerInput$LdapServerMetadata" : "The metadata of the LDAP server used to authenticate and authorize connections to the broker.", + "UpdateBrokerInput$LdapServerMetadata" : "The metadata of the LDAP server used to authenticate and authorize connections to the broker." + } + }, + "LdapServerMetadataOutput" : { + "base" : "The metadata of the LDAP server used to authenticate and authorize connections to the broker.", + "refs" : { + "DescribeBrokerOutput$LdapServerMetadata" : "The metadata of the LDAP server used to authenticate and authorize connections to the broker.", + "DescribeBrokerOutput$PendingLdapServerMetadata" : "The metadata of the LDAP server that will be used to authenticate and authorize connections to the broker once it is rebooted.", + "UpdateBrokerOutput$LdapServerMetadata" : "The metadata of the LDAP server used to authenticate and authorize connections to the broker." + } + }, "ListBrokersOutput" : { "base" : "A list of information about all brokers.", "refs" : { } @@ -328,6 +356,10 @@ "DescribeBrokerOutput$PubliclyAccessible" : "Required. Enables connections from applications outside of the VPC that hosts the broker's subnets.", "DescribeUserOutput$ConsoleAccess" : "Enables access to the the ActiveMQ Web Console for the ActiveMQ user.", "EncryptionOptions$UseAwsOwnedKey" : "Enables the use of an AWS owned CMK using AWS Key Management Service (KMS).", + "LdapServerMetadataInput$RoleSearchSubtree" : "The directory search scope for the role. If set to true, scope is to search the entire sub-tree.", + "LdapServerMetadataInput$UserSearchSubtree" : "The directory search scope for the user. If set to true, scope is to search the entire sub-tree.", + "LdapServerMetadataOutput$RoleSearchSubtree" : "The directory search scope for the role. If set to true, scope is to search the entire sub-tree.", + "LdapServerMetadataOutput$UserSearchSubtree" : "The directory search scope for the user. If set to true, scope is to search the entire sub-tree.", "Logs$Audit" : "Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.", "Logs$General" : "Enables general logging.", "LogsSummary$Audit" : "Enables audit logging. Every user management action made using JMX or the ActiveMQ Web Console is logged.", @@ -449,6 +481,8 @@ "DescribeBrokerOutput$SecurityGroups" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.", "DescribeBrokerOutput$SubnetIds" : "The list of groups (2 maximum) that define which subnets and IP ranges the broker can use from different Availability Zones. A SINGLE_INSTANCE deployment requires one subnet (for example, the default subnet). An ACTIVE_STANDBY_MULTI_AZ deployment requires two subnets.", "DescribeUserOutput$Groups" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.", + "LdapServerMetadataInput$Hosts" : "Fully qualified domain name of the LDAP server. Optional failover server.", + "LdapServerMetadataOutput$Hosts" : "Fully qualified domain name of the LDAP server. Optional failover server.", "UpdateBrokerInput$SecurityGroups" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.", "UpdateBrokerOutput$SecurityGroups" : "The list of security groups (1 minimum, 5 maximum) that authorizes connections to brokers.", "UpdateUserInput$Groups" : "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.", @@ -511,10 +545,25 @@ "DescribeConfigurationRevisionOutput$Description" : "The description of the configuration.", "DescribeUserOutput$BrokerId" : "Required. The unique ID that Amazon MQ generates for the broker.", "DescribeUserOutput$Username" : "Required. The username of the ActiveMQ user. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long.", - "EncryptionOptions$KmsKeyId" : "The customer master key (CMK) to use for the AWS Key Management Service (KMS). This key is used to encrypt your data at rest. If not provided, Amazon MQ will use a default CMK to encrypt your data.", + "EncryptionOptions$KmsKeyId" : "The symmetric customer master key (CMK) to use for the AWS Key Management Service (KMS). This key is used to encrypt your data at rest. If not provided, Amazon MQ will use a default CMK to encrypt your data.", "EngineVersion$Name" : "Id for the version.", "Error$ErrorAttribute" : "The attribute which caused the error.", "Error$Message" : "The explanation of the error.", + "LdapServerMetadataInput$RoleBase" : "Fully qualified name of the directory to search for a user’s groups.", + "LdapServerMetadataInput$RoleName" : "Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query.", + "LdapServerMetadataInput$RoleSearchMatching" : "The search criteria for groups.", + "LdapServerMetadataInput$ServiceAccountPassword" : "Service account password.", + "LdapServerMetadataInput$ServiceAccountUsername" : "Service account username.", + "LdapServerMetadataInput$UserBase" : "Fully qualified name of the directory where you want to search for users.", + "LdapServerMetadataInput$UserRoleName" : "Specifies the name of the LDAP attribute for the user group membership.", + "LdapServerMetadataInput$UserSearchMatching" : "The search criteria for users.", + "LdapServerMetadataOutput$RoleBase" : "Fully qualified name of the directory to search for a user’s groups.", + "LdapServerMetadataOutput$RoleName" : "Specifies the LDAP attribute that identifies the group name attribute in the object returned from the group membership query.", + "LdapServerMetadataOutput$RoleSearchMatching" : "The search criteria for groups.", + "LdapServerMetadataOutput$ServiceAccountUsername" : "Service account username.", + "LdapServerMetadataOutput$UserBase" : "Fully qualified name of the directory where you want to search for users.", + "LdapServerMetadataOutput$UserRoleName" : "Specifies the name of the LDAP attribute for the user group membership.", + "LdapServerMetadataOutput$UserSearchMatching" : "The search criteria for users.", "ListBrokersOutput$NextToken" : "The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.", "ListConfigurationRevisionsOutput$ConfigurationId" : "The unique ID that Amazon MQ generates for the configuration.", "ListConfigurationRevisionsOutput$NextToken" : "The token that specifies the next page of results Amazon MQ should return. To request the first page, leave nextToken empty.", diff --git a/models/apis/mq/2017-11-27/paginators-1.json b/models/apis/mq/2017-11-27/paginators-1.json index f3b7195d8e1..68f394dbf60 100644 --- a/models/apis/mq/2017-11-27/paginators-1.json +++ b/models/apis/mq/2017-11-27/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination" : { } + "pagination": { + "ListBrokers": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "BrokerSummaries" + } + } } \ No newline at end of file diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 34f222affaa..5c6294d3a70 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -353,6 +353,15 @@ {"shape":"ResourceInUse"} ] }, + "CreateWorkforce":{ + "name":"CreateWorkforce", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWorkforceRequest"}, + "output":{"shape":"CreateWorkforceResponse"} + }, "CreateWorkteam":{ "name":"CreateWorkteam", "http":{ @@ -443,6 +452,7 @@ "input":{"shape":"DeleteFlowDefinitionRequest"}, "output":{"shape":"DeleteFlowDefinitionResponse"}, "errors":[ + {"shape":"ResourceInUse"}, {"shape":"ResourceNotFound"} ] }, @@ -546,6 +556,15 @@ {"shape":"ResourceNotFound"} ] }, + "DeleteWorkforce":{ + "name":"DeleteWorkforce", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWorkforceRequest"}, + "output":{"shape":"DeleteWorkforceResponse"} + }, "DeleteWorkteam":{ "name":"DeleteWorkteam", "http":{ @@ -1155,6 +1174,15 @@ "input":{"shape":"ListUserProfilesRequest"}, "output":{"shape":"ListUserProfilesResponse"} }, + "ListWorkforces":{ + "name":"ListWorkforces", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWorkforcesRequest"}, + "output":{"shape":"ListWorkforcesResponse"} + }, "ListWorkteams":{ "name":"ListWorkteams", "http":{ @@ -1171,7 +1199,10 @@ "requestUri":"/" }, "input":{"shape":"RenderUiTemplateRequest"}, - "output":{"shape":"RenderUiTemplateResponse"} + "output":{"shape":"RenderUiTemplateResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] }, "Search":{ "name":"Search", @@ -1810,7 +1841,7 @@ "ModelDataUrl" ], "members":{ - "Image":{"shape":"Image"}, + "Image":{"shape":"ContainerImage"}, "ModelDataUrl":{"shape":"Url"}, "Environment":{"shape":"EnvironmentMap"} } @@ -2211,6 +2242,19 @@ "type":"list", "member":{"shape":"Cidr"} }, + "ClientId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w+-]+" + }, + "ClientSecret":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\w+=/-]+", + "sensitive":true + }, "CodeRepositoryArn":{ "type":"string", "max":2048, @@ -2268,11 +2312,16 @@ "type":"list", "member":{"shape":"CodeRepositorySummary"} }, - "CognitoClientId":{ - "type":"string", - "max":128, - "min":1, - "pattern":"[\\w+]+" + "CognitoConfig":{ + "type":"structure", + "required":[ + "UserPool", + "ClientId" + ], + "members":{ + "UserPool":{"shape":"CognitoUserPool"}, + "ClientId":{"shape":"ClientId"} + } }, "CognitoMemberDefinition":{ "type":"structure", @@ -2284,7 +2333,7 @@ "members":{ "UserPool":{"shape":"CognitoUserPool"}, "UserGroup":{"shape":"CognitoUserGroup"}, - "ClientId":{"shape":"CognitoClientId"} + "ClientId":{"shape":"ClientId"} } }, "CognitoUserGroup":{ @@ -2351,7 +2400,6 @@ "CompilationJobName", "CompilationJobArn", "CreationTime", - "CompilationTargetDevice", "CompilationJobStatus" ], "members":{ @@ -2361,10 +2409,19 @@ "CompilationStartTime":{"shape":"Timestamp"}, "CompilationEndTime":{"shape":"Timestamp"}, "CompilationTargetDevice":{"shape":"TargetDevice"}, + "CompilationTargetPlatformOs":{"shape":"TargetPlatformOs"}, + "CompilationTargetPlatformArch":{"shape":"TargetPlatformArch"}, + "CompilationTargetPlatformAccelerator":{"shape":"TargetPlatformAccelerator"}, "LastModifiedTime":{"shape":"LastModifiedTime"}, "CompilationJobStatus":{"shape":"CompilationJobStatus"} } }, + "CompilerOptions":{ + "type":"string", + "max":1024, + "min":7, + "pattern":"^\\{.+\\}$" + }, "CompressionType":{ "type":"string", "enum":[ @@ -2409,7 +2466,7 @@ "type":"structure", "members":{ "ContainerHostname":{"shape":"ContainerHostname"}, - "Image":{"shape":"Image"}, + "Image":{"shape":"ContainerImage"}, "Mode":{"shape":"ContainerMode"}, "ModelDataUrl":{"shape":"Url"}, "Environment":{"shape":"EnvironmentMap"}, @@ -2437,6 +2494,11 @@ "max":63, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, + "ContainerImage":{ + "type":"string", + "max":255, + "pattern":"[\\S]+" + }, "ContainerMode":{ "type":"string", "enum":[ @@ -3086,6 +3148,24 @@ "UserProfileArn":{"shape":"UserProfileArn"} } }, + "CreateWorkforceRequest":{ + "type":"structure", + "required":["WorkforceName"], + "members":{ + "CognitoConfig":{"shape":"CognitoConfig"}, + "OidcConfig":{"shape":"OidcConfig"}, + "SourceIpConfig":{"shape":"SourceIpConfig"}, + "WorkforceName":{"shape":"WorkforceName"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateWorkforceResponse":{ + "type":"structure", + "required":["WorkforceArn"], + "members":{ + "WorkforceArn":{"shape":"WorkforceArn"} + } + }, "CreateWorkteamRequest":{ "type":"structure", "required":[ @@ -3095,6 +3175,7 @@ ], "members":{ "WorkteamName":{"shape":"WorkteamName"}, + "WorkforceName":{"shape":"WorkforceName"}, "MemberDefinitions":{"shape":"MemberDefinitions"}, "Description":{"shape":"String200"}, "NotificationConfiguration":{"shape":"NotificationConfiguration"}, @@ -3402,6 +3483,18 @@ "UserProfileName":{"shape":"UserProfileName"} } }, + "DeleteWorkforceRequest":{ + "type":"structure", + "required":["WorkforceName"], + "members":{ + "WorkforceName":{"shape":"WorkforceName"} + } + }, + "DeleteWorkforceResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteWorkteamRequest":{ "type":"structure", "required":["WorkteamName"], @@ -3419,8 +3512,8 @@ "DeployedImage":{ "type":"structure", "members":{ - "SpecifiedImage":{"shape":"Image"}, - "ResolvedImage":{"shape":"Image"}, + "SpecifiedImage":{"shape":"ContainerImage"}, + "ResolvedImage":{"shape":"ContainerImage"}, "ResolutionTime":{"shape":"Timestamp"} } }, @@ -4783,6 +4876,18 @@ "type":"string", "pattern":"^https://([^/]+)/?(.*)$" }, + "Group":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+" + }, + "Groups":{ + "type":"list", + "member":{"shape":"Group"}, + "max":10, + "min":1 + }, "HookParameters":{ "type":"map", "key":{"shape":"ConfigKey"}, @@ -5135,10 +5240,10 @@ "max":100, "min":0 }, - "Image":{ + "ImageArn":{ "type":"string", - "max":255, - "pattern":"[\\S]+" + "max":256, + "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image/[a-z0-9]([-.]?[a-z0-9])*$" }, "ImageDigest":{ "type":"string", @@ -5382,7 +5487,6 @@ }, "LabelingJobDataSource":{ "type":"structure", - "required":["S3DataSource"], "members":{ "S3DataSource":{"shape":"LabelingJobS3DataSource"} } @@ -5453,6 +5557,7 @@ "LabelingJobStatus":{ "type":"string", "enum":[ + "Initializing", "InProgress", "Completed", "Failed", @@ -6175,6 +6280,34 @@ "NextToken":{"shape":"NextToken"} } }, + "ListWorkforcesRequest":{ + "type":"structure", + "members":{ + "SortBy":{"shape":"ListWorkforcesSortByOptions"}, + "SortOrder":{"shape":"SortOrder"}, + "NameContains":{"shape":"WorkforceName"}, + "NextToken":{"shape":"NextToken"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + } + } + }, + "ListWorkforcesResponse":{ + "type":"structure", + "required":["Workforces"], + "members":{ + "Workforces":{"shape":"Workforces"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListWorkforcesSortByOptions":{ + "type":"string", + "enum":[ + "Name", + "CreateDate" + ] + }, "ListWorkteamsRequest":{ "type":"structure", "members":{ @@ -6266,7 +6399,8 @@ "MemberDefinition":{ "type":"structure", "members":{ - "CognitoMemberDefinition":{"shape":"CognitoMemberDefinition"} + "CognitoMemberDefinition":{"shape":"CognitoMemberDefinition"}, + "OidcMemberDefinition":{"shape":"OidcMemberDefinition"} } }, "MemberDefinitions":{ @@ -6354,7 +6488,7 @@ "required":["Image"], "members":{ "ContainerHostname":{"shape":"ContainerHostname"}, - "Image":{"shape":"Image"}, + "Image":{"shape":"ContainerImage"}, "ImageDigest":{"shape":"ImageDigest"}, "ModelDataUrl":{"shape":"Url"}, "ProductId":{"shape":"ProductId"} @@ -6933,6 +7067,53 @@ "Failed":{"shape":"ObjectiveStatusCounter"} } }, + "OidcConfig":{ + "type":"structure", + "required":[ + "ClientId", + "ClientSecret", + "Issuer", + "AuthorizationEndpoint", + "TokenEndpoint", + "UserInfoEndpoint", + "LogoutEndpoint", + "JwksUri" + ], + "members":{ + "ClientId":{"shape":"ClientId"}, + "ClientSecret":{"shape":"ClientSecret"}, + "Issuer":{"shape":"OidcEndpoint"}, + "AuthorizationEndpoint":{"shape":"OidcEndpoint"}, + "TokenEndpoint":{"shape":"OidcEndpoint"}, + "UserInfoEndpoint":{"shape":"OidcEndpoint"}, + "LogoutEndpoint":{"shape":"OidcEndpoint"}, + "JwksUri":{"shape":"OidcEndpoint"} + } + }, + "OidcConfigForResponse":{ + "type":"structure", + "members":{ + "ClientId":{"shape":"ClientId"}, + "Issuer":{"shape":"OidcEndpoint"}, + "AuthorizationEndpoint":{"shape":"OidcEndpoint"}, + "TokenEndpoint":{"shape":"OidcEndpoint"}, + "UserInfoEndpoint":{"shape":"OidcEndpoint"}, + "LogoutEndpoint":{"shape":"OidcEndpoint"}, + "JwksUri":{"shape":"OidcEndpoint"} + } + }, + "OidcEndpoint":{ + "type":"string", + "max":500, + "pattern":"https://\\S+" + }, + "OidcMemberDefinition":{ + "type":"structure", + "required":["Groups"], + "members":{ + "Groups":{"shape":"Groups"} + } + }, "Operator":{ "type":"string", "enum":[ @@ -6963,13 +7144,12 @@ }, "OutputConfig":{ "type":"structure", - "required":[ - "S3OutputLocation", - "TargetDevice" - ], + "required":["S3OutputLocation"], "members":{ "S3OutputLocation":{"shape":"S3Uri"}, - "TargetDevice":{"shape":"TargetDevice"} + "TargetDevice":{"shape":"TargetDevice"}, + "TargetPlatform":{"shape":"TargetPlatform"}, + "CompilerOptions":{"shape":"CompilerOptions"} } }, "OutputDataConfig":{ @@ -7637,7 +7817,7 @@ "ResourceSpec":{ "type":"structure", "members":{ - "SageMakerImageArn":{"shape":"SageMakerImageArn"}, + "SageMakerImageArn":{"shape":"ImageArn"}, "InstanceType":{"shape":"AppInstanceType"} } }, @@ -7742,11 +7922,6 @@ "max":1024, "pattern":"^(https|s3)://([^/]+)/?(.*)$" }, - "SageMakerImageArn":{ - "type":"string", - "max":256, - "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image/[a-z0-9]([-.]?[a-z0-9])*$" - }, "SamplingPercentage":{ "type":"integer", "max":100, @@ -8176,6 +8351,7 @@ "ml_c5", "ml_p2", "ml_p3", + "ml_g4dn", "ml_inf1", "jetson_tx1", "jetson_tx2", @@ -8191,10 +8367,49 @@ "qcs605", "qcs603", "sitara_am57x", - "amba_cv22" + "amba_cv22", + "x86_win32", + "x86_win64" ] }, "TargetObjectiveMetricValue":{"type":"float"}, + "TargetPlatform":{ + "type":"structure", + "required":[ + "Os", + "Arch" + ], + "members":{ + "Os":{"shape":"TargetPlatformOs"}, + "Arch":{"shape":"TargetPlatformArch"}, + "Accelerator":{"shape":"TargetPlatformAccelerator"} + } + }, + "TargetPlatformAccelerator":{ + "type":"string", + "enum":[ + "INTEL_GRAPHICS", + "MALI", + "NVIDIA" + ] + }, + "TargetPlatformArch":{ + "type":"string", + "enum":[ + "X86_64", + "X86", + "ARM64", + "ARM_EABI", + "ARM_EABIHF" + ] + }, + "TargetPlatformOs":{ + "type":"string", + "enum":[ + "ANDROID", + "LINUX" + ] + }, "TaskAvailabilityLifetimeInSeconds":{ "type":"integer", "max":864000, @@ -8471,7 +8686,7 @@ "TrainingChannels" ], "members":{ - "TrainingImage":{"shape":"Image"}, + "TrainingImage":{"shape":"ContainerImage"}, "TrainingImageDigest":{"shape":"ImageDigest"}, "SupportedHyperParameters":{"shape":"HyperParameterSpecifications"}, "SupportedTrainingInstanceTypes":{"shape":"TrainingInstanceTypes"}, @@ -8558,6 +8773,32 @@ "member":{"shape":"TransformInstanceType"}, "min":1 }, + "TransformJob":{ + "type":"structure", + "members":{ + "TransformJobName":{"shape":"TransformJobName"}, + "TransformJobArn":{"shape":"TransformJobArn"}, + "TransformJobStatus":{"shape":"TransformJobStatus"}, + "FailureReason":{"shape":"FailureReason"}, + "ModelName":{"shape":"ModelName"}, + "MaxConcurrentTransforms":{"shape":"MaxConcurrentTransforms"}, + "ModelClientConfig":{"shape":"ModelClientConfig"}, + "MaxPayloadInMB":{"shape":"MaxPayloadInMB"}, + "BatchStrategy":{"shape":"BatchStrategy"}, + "Environment":{"shape":"TransformEnvironmentMap"}, + "TransformInput":{"shape":"TransformInput"}, + "TransformOutput":{"shape":"TransformOutput"}, + "TransformResources":{"shape":"TransformResources"}, + "CreationTime":{"shape":"Timestamp"}, + "TransformStartTime":{"shape":"Timestamp"}, + "TransformEndTime":{"shape":"Timestamp"}, + "LabelingJobArn":{"shape":"LabelingJobArn"}, + "AutoMLJobArn":{"shape":"AutoMLJobArn"}, + "DataProcessing":{"shape":"DataProcessing"}, + "ExperimentConfig":{"shape":"ExperimentConfig"}, + "Tags":{"shape":"TagList"} + } + }, "TransformJobArn":{ "type":"string", "max":256, @@ -8802,7 +9043,8 @@ "members":{ "SourceArn":{"shape":"TrialComponentSourceArn"}, "TrainingJob":{"shape":"TrainingJob"}, - "ProcessingJob":{"shape":"ProcessingJob"} + "ProcessingJob":{"shape":"ProcessingJob"}, + "TransformJob":{"shape":"TransformJob"} } }, "TrialComponentStatus":{ @@ -9099,7 +9341,8 @@ "required":["WorkforceName"], "members":{ "WorkforceName":{"shape":"WorkforceName"}, - "SourceIpConfig":{"shape":"SourceIpConfig"} + "SourceIpConfig":{"shape":"SourceIpConfig"}, + "OidcConfig":{"shape":"OidcConfig"} } }, "UpdateWorkforceResponse":{ @@ -9256,7 +9499,11 @@ "WorkforceName":{"shape":"WorkforceName"}, "WorkforceArn":{"shape":"WorkforceArn"}, "LastUpdatedDate":{"shape":"Timestamp"}, - "SourceIpConfig":{"shape":"SourceIpConfig"} + "SourceIpConfig":{"shape":"SourceIpConfig"}, + "SubDomain":{"shape":"String"}, + "CognitoConfig":{"shape":"CognitoConfig"}, + "OidcConfig":{"shape":"OidcConfigForResponse"}, + "CreateDate":{"shape":"Timestamp"} } }, "WorkforceArn":{ @@ -9270,6 +9517,10 @@ "min":1, "pattern":"^[a-zA-Z0-9]([a-zA-Z0-9\\-])*$" }, + "Workforces":{ + "type":"list", + "member":{"shape":"Workforce"} + }, "Workteam":{ "type":"structure", "required":[ @@ -9282,6 +9533,7 @@ "WorkteamName":{"shape":"WorkteamName"}, "MemberDefinitions":{"shape":"MemberDefinitions"}, "WorkteamArn":{"shape":"WorkteamArn"}, + "WorkforceArn":{"shape":"WorkforceArn"}, "ProductListingIds":{"shape":"ProductListings"}, "Description":{"shape":"String200"}, "SubDomain":{"shape":"String"}, diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index d04f85c9788..ffd258544ca 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -8,11 +8,11 @@ "CreateApp": "

Creates a running App for the specified UserProfile. Supported Apps are JupyterServer and KernelGateway. This operation is automatically invoked by Amazon SageMaker Studio upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

", "CreateAutoMLJob": "

Creates an AutoPilot job.

After you run an AutoPilot job, you can find the best performing model by calling , and then deploy that model by following the steps described in Step 6.1: Deploy the Model to Amazon SageMaker Hosting Services.

For information about how to use AutoPilot, see Use AutoPilot to Automate Model Development.

", "CreateCodeRepository": "

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

", - "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", + "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", "CreateDomain": "

Creates a Domain used by SageMaker Studio. A domain consists of an associated directory, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

When a domain is created, an Amazon Elastic File System (EFS) volume is also created for use by all of the users within the domain. Each user receives a private home directory within the EFS for notebooks, Git repositories, and data files.

All traffic between the domain and the EFS volume is communicated through the specified subnet IDs. All other traffic goes over the Internet through an Amazon SageMaker system VPC. The EFS traffic uses the NFS/TCP protocol over port 2049.

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a SageMaker Studio app successfully.

", "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

", "CreateEndpointConfig": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

", - "CreateExperiment": "

Creates an Amazon SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

", + "CreateExperiment": "

Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

", "CreateFlowDefinition": "

Creates a flow definition.

", "CreateHumanTaskUi": "

Defines the settings you will use for the human review workflow user interface. Reviewers will see a three-panel interface with an instruction area, the item to review, and an input area.

", "CreateHyperParameterTuningJob": "

Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.

", @@ -30,6 +30,7 @@ "CreateTrial": "

Creates an Amazon SageMaker trial. A trial is a set of steps called trial components that produce a machine learning model. A trial is part of a single Amazon SageMaker experiment.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial and then use the Search API to search for the tags.

To get a list of all your trials, call the ListTrials API. To view a trial's properties, call the DescribeTrial API. To create a trial component, call the CreateTrialComponent API.

", "CreateTrialComponent": "

Creates a trial component, which is a stage of a machine learning trial. A trial is composed of one or more trial components. A trial component can be used in multiple trials.

Trial components include pre-processing jobs, training jobs, and batch transform jobs.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to a trial component and then use the Search API to search for the tags.

CreateTrialComponent can only be invoked from within an Amazon SageMaker managed environment. This includes Amazon SageMaker training jobs, processing jobs, transform jobs, and Amazon SageMaker notebooks. A call to CreateTrialComponent from outside one of these environments results in an error.

", "CreateUserProfile": "

Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to Amazon SageMaker Studio. If an administrator invites a person by email or imports them from SSO, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System (EFS) home directory.

", + "CreateWorkforce": "

Use this operation to create a workforce. This operation will return an error if a workforce already exists in the AWS Region that you specify. You can only create one workforce in each AWS Region.

If you want to create a new workforce in an AWS Region where the a workforce already exists, use the API operation to delete the existing workforce and then use this operation to create a new workforce.

To create a private workforce using Amazon Cognito, you must specify a Cognito user pool in CognitoConfig. You can also create an Amazon Cognito workforce using the Amazon SageMaker console. For more information, see Create a Private Workforce (Amazon Cognito).

To create a private workforce using your own OIDC Identity Provider (IdP), specify your IdP configuration in OidcConfig. You must create a OIDC IdP workforce using this API operation. For more information, see Create a Private Workforce (OIDC IdP).

", "CreateWorkteam": "

Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.

You cannot create more than 25 work teams in an account and region.

", "DeleteAlgorithm": "

Removes the specified algorithm from your account.

", "DeleteApp": "

Used to stop and delete an app.

", @@ -39,7 +40,7 @@ "DeleteEndpointConfig": "

Deletes an endpoint configuration. The DeleteEndpointConfig API deletes only the specified configuration. It does not delete endpoints created using the configuration.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

", "DeleteExperiment": "

Deletes an Amazon SageMaker experiment. All trials associated with the experiment must be deleted first. Use the ListTrials API to get a list of the trials associated with the experiment.

", "DeleteFlowDefinition": "

Deletes the specified flow definition.

", - "DeleteHumanTaskUi": "

Use this operation to delete a worker task template (HumanTaskUi).

To see a list of human task user interfaces (work task templates) in your account, use . When you delete a worker task template, it no longer appears when you call ListHumanTaskUis.

", + "DeleteHumanTaskUi": "

Use this operation to delete a human task user interface (worker task template).

To see a list of human task user interfaces (work task templates) in your account, use . When you delete a worker task template, it no longer appears when you call ListHumanTaskUis.

", "DeleteModel": "

Deletes a model. The DeleteModel API deletes only the model entry that was created in Amazon SageMaker when you called the CreateModel API. It does not delete model artifacts, inference code, or the IAM role that you specified when creating the model.

", "DeleteModelPackage": "

Deletes a model package.

A model package is used to create Amazon SageMaker models or list on AWS Marketplace. Buyers can subscribe to model packages listed on AWS Marketplace to create models in Amazon SageMaker.

", "DeleteMonitoringSchedule": "

Deletes a monitoring schedule. Also stops the schedule had not already been stopped. This does not delete the job execution history of the monitoring schedule.

", @@ -49,6 +50,7 @@ "DeleteTrial": "

Deletes the specified trial. All trial components that make up the trial must be deleted first. Use the DescribeTrialComponent API to get the list of trial components.

", "DeleteTrialComponent": "

Deletes the specified trial component. A trial component must be disassociated from all trials before the trial component can be deleted. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.

", "DeleteUserProfile": "

Deletes a user profile. When a user profile is deleted, the user loses access to their EFS volume, including data, notebooks, and other artifacts.

", + "DeleteWorkforce": "

Use this operation to delete a workforce.

If you want to create a new workforce in an AWS Region where the a workforce already exists, use this operation to delete the existing workforce and then use to create a new workforce.

", "DeleteWorkteam": "

Deletes an existing work team. This operation can't be undone.

", "DescribeAlgorithm": "

Returns a description of the specified algorithm that is in your account.

", "DescribeApp": "

Describes the app.

", @@ -109,6 +111,7 @@ "ListTrialComponents": "

Lists the trial components in your account. You can sort the list by trial component name or creation time. You can filter the list to show only components that were created in a specific time range. You can also filter on one of the following:

", "ListTrials": "

Lists the trials in your account. Specify an experiment name to limit the list to the trials that are part of that experiment. Specify a trial component name to limit the list to the trials that associated with that trial component. The list can be filtered to show only trials that were created in a specific time range. The list can be sorted by trial name or creation time.

", "ListUserProfiles": "

Lists user profiles.

", + "ListWorkforces": "

Use this operation to list all private and vendor workforces in an AWS Region. Note that you can only have one private workforce per AWS Region.

", "ListWorkteams": "

Gets a list of work teams that you have defined in a region. The list may be empty if no work team satisfies the filter specified in the NameContains parameter.

", "RenderUiTemplate": "

Renders the UI template so that you can preview the worker's experience.

", "Search": "

Finds Amazon SageMaker resources that match a search query. Matching resources are returned as a list of SearchRecord objects in the response. You can sort the search results by any resource property in a ascending or descending order.

You can query against the following value types: numeric, text, Boolean, and timestamp.

", @@ -442,7 +445,8 @@ "DescribeTrainingJobResponse$AutoMLJobArn": "

The Amazon Resource Name (ARN) of an AutoML job.

", "DescribeTransformJobResponse$AutoMLJobArn": "

The Amazon Resource Name (ARN) of the AutoML transform job.

", "ProcessingJob$AutoMLJobArn": "

The Amazon Resource Name (ARN) of the AutoML job associated with this processing job.

", - "TrainingJob$AutoMLJobArn": "

The Amazon Resource Name (ARN) of the job.

" + "TrainingJob$AutoMLJobArn": "

The Amazon Resource Name (ARN) of the job.

", + "TransformJob$AutoMLJobArn": "

The Amazon Resource Name (ARN) of the AutoML job that created the transform job.

" } }, "AutoMLJobArtifacts": { @@ -586,6 +590,7 @@ "refs": { "CreateTransformJobRequest$BatchStrategy": "

Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.

To enable the batch strategy, you must set the SplitType property to Line, RecordIO, or TFRecord.

To use only one record when making an HTTP invocation request to a container, set BatchStrategy to SingleRecord and SplitType to Line.

To fit as many records in a mini-batch as can fit within the MaxPayloadInMB limit, set BatchStrategy to MultiRecord and SplitType to Line.

", "DescribeTransformJobResponse$BatchStrategy": "

Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.

To enable the batch strategy, you must set SplitType to Line, RecordIO, or TFRecord.

", + "TransformJob$BatchStrategy": "

Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.

", "TransformJobDefinition$BatchStrategy": "

A string that determines the number of records included in a single mini-batch.

SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.

" } }, @@ -795,6 +800,21 @@ "SourceIpConfig$Cidrs": "

A list of one to ten Classless Inter-Domain Routing (CIDR) values.

Maximum: Ten CIDR values

The following Length Constraints apply to individual CIDR values in the CIDR value list.

" } }, + "ClientId": { + "base": null, + "refs": { + "CognitoConfig$ClientId": "

The client ID for your Amazon Cognito user pool.

", + "CognitoMemberDefinition$ClientId": "

An identifier for an application client. You must create the app client ID using Amazon Cognito.

", + "OidcConfig$ClientId": "

The OIDC IdP client ID used to configure your private workforce.

", + "OidcConfigForResponse$ClientId": "

The OIDC IdP client ID used to configure your private workforce.

" + } + }, + "ClientSecret": { + "base": null, + "refs": { + "OidcConfig$ClientSecret": "

The OIDC IdP client secret used to configure your private workforce.

" + } + }, "CodeRepositoryArn": { "base": null, "refs": { @@ -851,10 +871,11 @@ "ListCodeRepositoriesOutput$CodeRepositorySummaryList": "

Gets a list of summaries of the Git repositories. Each summary specifies the following values for the repository:

  • Name

  • Amazon Resource Name (ARN)

  • Creation time

  • Last modified time

  • Configuration information, including the URL location of the repository and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.

" } }, - "CognitoClientId": { - "base": null, + "CognitoConfig": { + "base": "

Use this parameter to configure your Amazon Cognito workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool.

", "refs": { - "CognitoMemberDefinition$ClientId": "

An identifier for an application client. You must create the app client ID using Amazon Cognito.

" + "CreateWorkforceRequest$CognitoConfig": "

Use this parameter to configure an Amazon Cognito private workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool.

Do not use OidcConfig if you specify values for CognitoConfig.

", + "Workforce$CognitoConfig": "

The configuration of an Amazon Cognito workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool.

" } }, "CognitoMemberDefinition": { @@ -872,6 +893,7 @@ "CognitoUserPool": { "base": null, "refs": { + "CognitoConfig$UserPool": "

A user pool is a user directory in Amazon Cognito. With a user pool, your users can sign in to your web or mobile app through Amazon Cognito. Your users can also sign in through social identity providers like Google, Facebook, Amazon, or Apple, and through SAML identity providers.

", "CognitoMemberDefinition$UserPool": "

An identifier for a user pool. The user pool must be in the same region as the service that you are calling.

" } }, @@ -927,6 +949,12 @@ "CompilationJobSummaries$member": null } }, + "CompilerOptions": { + "base": null, + "refs": { + "OutputConfig$CompilerOptions": "

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compliations. For any other cases, it is optional to specify CompilerOptions.

  • CPU: Compilation for CPU supports the following compiler options.

    • mcpu: CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'}

    • mattr: CPU flags. For example, {'mattr': ['+neon', '+vfpv4']}

  • ARM: Details of ARM CPU compilations.

    • NEON: NEON is an implementation of the Advanced SIMD extension used in ARMv7 processors.

      For example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM 32-bit platform with the NEON support.

  • NVIDIA: Compilation for NVIDIA GPU supports the following compiler options.

    • gpu_code: Specifies the targeted architecture.

    • trt-ver: Specifies the TensorRT versions in x.y.z. format.

    • cuda-ver: Specifies the CUDA version in x.y format.

    For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'}

  • ANDROID: Compilation for the Android OS supports the following compiler options:

    • ANDROID_PLATFORM: Specifies the Android API levels. Available levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}.

    • mattr: Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform with NEON support.

" + } + }, "CompressionType": { "base": null, "refs": { @@ -1011,6 +1039,17 @@ "ModelPackageContainerDefinition$ContainerHostname": "

The DNS host name for the Docker container.

" } }, + "ContainerImage": { + "base": null, + "refs": { + "AutoMLContainerDefinition$Image": "

The ECR path of the container. Refer to ContainerDefinition for more details.

", + "ContainerDefinition$Image": "

The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker

", + "DeployedImage$SpecifiedImage": "

The image path you specified when you created the model.

", + "DeployedImage$ResolvedImage": "

The specific digest path of the image hosted in this ProductionVariant.

", + "ModelPackageContainerDefinition$Image": "

The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.

If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker.

", + "TrainingSpecification$TrainingImage": "

The Amazon ECR registry path of the Docker image that contains the training algorithm.

" + } + }, "ContainerMode": { "base": null, "refs": { @@ -1322,6 +1361,16 @@ "refs": { } }, + "CreateWorkforceRequest": { + "base": null, + "refs": { + } + }, + "CreateWorkforceResponse": { + "base": null, + "refs": { + } + }, "CreateWorkteamRequest": { "base": null, "refs": { @@ -1408,7 +1457,8 @@ "base": "

The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records.

", "refs": { "CreateTransformJobRequest$DataProcessing": "

The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output. The input filter provided allows you to exclude input data that is not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records.

", - "DescribeTransformJobResponse$DataProcessing": null + "DescribeTransformJobResponse$DataProcessing": null, + "TransformJob$DataProcessing": null } }, "DataSource": { @@ -1572,6 +1622,16 @@ "refs": { } }, + "DeleteWorkforceRequest": { + "base": null, + "refs": { + } + }, + "DeleteWorkforceResponse": { + "base": null, + "refs": { + } + }, "DeleteWorkteamRequest": { "base": null, "refs": { @@ -2235,7 +2295,7 @@ } }, "ExperimentConfig": { - "base": "

Configuration for the experiment.

", + "base": "

Associates a SageMaker job as a trial component with an experiment and trial. Specified when you call the following APIs:

", "refs": { "CreateProcessingJobRequest$ExperimentConfig": null, "CreateTrainingJobRequest$ExperimentConfig": null, @@ -2244,7 +2304,8 @@ "DescribeTrainingJobResponse$ExperimentConfig": null, "DescribeTransformJobResponse$ExperimentConfig": null, "ProcessingJob$ExperimentConfig": null, - "TrainingJob$ExperimentConfig": null + "TrainingJob$ExperimentConfig": null, + "TransformJob$ExperimentConfig": null } }, "ExperimentDescription": { @@ -2285,9 +2346,9 @@ "DisassociateTrialComponentRequest$TrialName": "

The name of the trial to disassociate from.

", "Experiment$ExperimentName": "

The name of the experiment.

", "Experiment$DisplayName": "

The name of the experiment as displayed. If DisplayName isn't specified, ExperimentName is displayed.

", - "ExperimentConfig$ExperimentName": "

The name of the experiment.

", - "ExperimentConfig$TrialName": "

The name of the trial.

", - "ExperimentConfig$TrialComponentDisplayName": "

Display name for the trial component.

", + "ExperimentConfig$ExperimentName": "

The name of an existing experiment to associate the trial component with.

", + "ExperimentConfig$TrialName": "

The name of an existing trial to associate the trial component with. If not specified, a new trial is created.

", + "ExperimentConfig$TrialComponentDisplayName": "

The display name for the trial component. If this key isn't specified, the display name is the trial component name.

", "ExperimentSummary$ExperimentName": "

The name of the experiment.

", "ExperimentSummary$DisplayName": "

The name of the experiment as displayed. If DisplayName isn't specified, ExperimentName is displayed.

", "ListTrialComponentsRequest$ExperimentName": "

A filter that returns only components that are part of the specified experiment. If you specify ExperimentName, you can't filter by SourceArn or TrialName.

", @@ -2367,6 +2428,7 @@ "ResourceLimitExceeded$Message": null, "ResourceNotFound$Message": null, "TrainingJob$FailureReason": "

If the training job failed, the reason it failed.

", + "TransformJob$FailureReason": "

If the transform job failed, the reason it failed.

", "TransformJobSummary$FailureReason": "

If the transform job failed, the reason it failed.

" } }, @@ -2567,6 +2629,18 @@ "GitConfig$RepositoryUrl": "

The URL where the Git repository is located.

" } }, + "Group": { + "base": null, + "refs": { + "Groups$member": null + } + }, + "Groups": { + "base": null, + "refs": { + "OidcMemberDefinition$Groups": "

A list of comma seperated strings that identifies user groups in your OIDC IdP. Each user group is made up of a group of private workers.

" + } + }, "HookParameters": { "base": null, "refs": { @@ -2621,7 +2695,7 @@ "HumanLoopConfig$HumanTaskUiArn": "

The Amazon Resource Name (ARN) of the human task user interface.

", "HumanTaskUiSummary$HumanTaskUiArn": "

The Amazon Resource Name (ARN) of the human task user interface.

", "RenderUiTemplateRequest$HumanTaskUiArn": "

The HumanTaskUiArn of the worker UI that you want to render. Do not provide a HumanTaskUiArn if you use the UiTemplate parameter.

See a list of available Human Ui Amazon Resource Names (ARNs) in UiConfig.

", - "UiConfig$HumanTaskUiArn": "

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks.

Use this parameter when you are creating a labeling job for 3D point cloud labeling modalities. Use your labeling job task type to select one of the following ARN's and use it with this parameter when you create a labeling job. Replace aws-region with the AWS region you are creating your labeling job in.

Use this HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object detection adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection

Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object tracking adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking

Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud semantic segmentation adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation

" + "UiConfig$HumanTaskUiArn": "

The ARN of the worker task template used to render the worker UI and tools for labeling job tasks.

Use this parameter when you are creating a labeling job for 3D point cloud and video fram labeling jobs. Use your labeling job task type to select one of the following ARN's and use it with this parameter when you create a labeling job. Replace aws-region with the AWS region you are creating your labeling job in.

3D Point Cloud HumanTaskUiArns

Use this HumanTaskUiArn for 3D point cloud object detection and 3D point cloud object detection adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection

Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object tracking adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking

Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud semantic segmentation adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation

Video Frame HumanTaskUiArns

Use this HumanTaskUiArn for video frame object detection and video frame object detection adjustment labeling jobs.

  • arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection

Use this HumanTaskUiArn for video frame object tracking and video frame object tracking adjustment labeling jobs.

  • arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking

" } }, "HumanTaskUiName": { @@ -2821,15 +2895,10 @@ "TrainingJobDefinition$HyperParameters": "

The hyperparameters used for the training job.

" } }, - "Image": { + "ImageArn": { "base": null, "refs": { - "AutoMLContainerDefinition$Image": "

The ECR path of the container. Refer to ContainerDefinition for more details.

", - "ContainerDefinition$Image": "

The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored. If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker

", - "DeployedImage$SpecifiedImage": "

The image path you specified when you created the model.

", - "DeployedImage$ResolvedImage": "

The specific digest path of the image hosted in this ProductionVariant.

", - "ModelPackageContainerDefinition$Image": "

The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.

If you are using your own custom algorithm instead of an algorithm provided by Amazon SageMaker, the inference code must meet Amazon SageMaker requirements. Amazon SageMaker supports both registry/repository[:tag] and registry/repository[@digest] image path formats. For more information, see Using Your Own Algorithms with Amazon SageMaker.

", - "TrainingSpecification$TrainingImage": "

The Amazon ECR registry path of the Docker image that contains the training algorithm.

" + "ResourceSpec$SageMakerImageArn": "

The Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" } }, "ImageDigest": { @@ -3048,7 +3117,8 @@ "DescribeTrainingJobResponse$LabelingJobArn": "

The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the transform or training job.

", "DescribeTransformJobResponse$LabelingJobArn": "

The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling job that created the transform or training job.

", "LabelingJobSummary$LabelingJobArn": "

The Amazon Resource Name (ARN) assigned to the labeling job when it was created.

", - "TrainingJob$LabelingJobArn": "

The Amazon Resource Name (ARN) of the labeling job.

" + "TrainingJob$LabelingJobArn": "

The Amazon Resource Name (ARN) of the labeling job.

", + "TransformJob$LabelingJobArn": "

The Amazon Resource Name (ARN) of the labeling job that created the transform job.

" } }, "LabelingJobDataAttributes": { @@ -3150,8 +3220,8 @@ "LambdaFunctionArn": { "base": null, "refs": { - "AnnotationConsolidationConfig$AnnotationConsolidationLambdaArn": "

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.

This parameter is required for all labeling jobs. For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition

3D point cloud object detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection

3D point cloud object tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking

3D point cloud semantic segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox

3D point cloud object detection adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection

3D point cloud object tracking adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking

3D point cloud semantic segmentation adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation

", - "HumanTaskConfig$PreHumanTaskLambdaArn": "

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for PreHumanTaskLambdaArn. For custom labeling workflows, see Pre-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition

3D Point Cloud Modalities

Use the following pre-annotation lambdas for 3D point cloud labeling modality tasks. See 3D Point Cloud Task types to learn more.

3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection

3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking

3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation

3D point cloud object detection adjustment - Adjust 3D cuboids in a point cloud frame.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection

3D point cloud object tracking adjustment - Adjust 3D cuboids across a sequence of point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking

3D point cloud semantic segmentation adjustment - Adjust semantic segmentation masks in a 3D point cloud.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation

", + "AnnotationConsolidationConfig$AnnotationConsolidationLambdaArn": "

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.

This parameter is required for all labeling jobs. For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition

    arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition

Video Classification - Use this task type when you need workers to classify videos using predefined labels that you specify. Workers are shown videos and are asked to choose one label for each video.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass

Video Frame Object Detection - Use this task type to have workers identify and locate objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to identify and localize various objects in a series of video frames, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection

Video Frame Object Tracking - Use this task type to have workers track the movement of objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to track the movement of objects, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking

3D point cloud object detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection

3D point cloud object tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking

3D point cloud semantic segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox

Video Frame Object Detection Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to classify and localize objects in a sequence of video frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection

Video Frame Object Tracking Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to track object movement across a sequence of video frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking

3D point cloud object detection adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection

3D point cloud object tracking adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking

3D point cloud semantic segmentation adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.

  • arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation

    arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation

", + "HumanTaskConfig$PreHumanTaskLambdaArn": "

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object is sent to a human worker. Use this function to provide input to a custom labeling job.

For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for PreHumanTaskLambdaArn. For custom labeling workflows, see Pre-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition

Video Classification - Use this task type when you need workers to classify videos using predefined labels that you specify. Workers are shown videos and are asked to choose one label for each video.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass

Video Frame Object Detection - Use this task type to have workers identify and locate objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to identify and localize various objects in a series of video frames, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection

Video Frame Object Tracking - Use this task type to have workers track the movement of objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to track the movement of objects, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking

3D Point Cloud Modalities

Use the following pre-annotation lambdas for 3D point cloud labeling modality tasks. See 3D Point Cloud Task types to learn more.

3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection

3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking

3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Bounding box verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking

Bounding box adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox

Semantic segmentation verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation

Semantic segmentation adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation

Video Frame Object Detection Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to classify and localize objects in a sequence of video frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection

Video Frame Object Tracking Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to track object movement across a sequence of video frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking

3D point cloud object detection adjustment - Adjust 3D cuboids in a point cloud frame.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection

3D point cloud object tracking adjustment - Adjust 3D cuboids across a sequence of point cloud frames.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking

3D point cloud semantic segmentation adjustment - Adjust semantic segmentation masks in a 3D point cloud.

  • arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation

  • arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation

", "LabelingJobSummary$PreHumanTaskLambdaArn": "

The Amazon Resource Name (ARN) of a Lambda function. The function is run before each data object is sent to a worker.

", "LabelingJobSummary$AnnotationConsolidationLambdaArn": "

The Amazon Resource Name (ARN) of the Lambda function used to consolidate the annotations from individual workers into a label for a data object. For more information, see Annotation Consolidation.

" } @@ -3505,6 +3575,22 @@ "refs": { } }, + "ListWorkforcesRequest": { + "base": null, + "refs": { + } + }, + "ListWorkforcesResponse": { + "base": null, + "refs": { + } + }, + "ListWorkforcesSortByOptions": { + "base": null, + "refs": { + "ListWorkforcesRequest$SortBy": "

Sort workforces using the workforce name or creation date.

" + } + }, "ListWorkteamsRequest": { "base": null, "refs": { @@ -3544,6 +3630,7 @@ "refs": { "CreateTransformJobRequest$MaxConcurrentTransforms": "

The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, Amazon SageMaker checks the optional execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For more information on execution-parameters, see How Containers Serve Requests. For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms.

", "DescribeTransformJobResponse$MaxConcurrentTransforms": "

The maximum number of parallel requests on each instance node that can be launched in a transform job. The default value is 1.

", + "TransformJob$MaxConcurrentTransforms": "

The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, SageMaker checks the optional execution-parameters to determine the settings for your chosen algorithm. If the execution-parameters endpoint is not enabled, the default value is 1. For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms.

", "TransformJobDefinition$MaxConcurrentTransforms": "

The maximum number of parallel requests that can be sent to each instance in a transform job. The default value is 1.

" } }, @@ -3570,6 +3657,7 @@ "refs": { "CreateTransformJobRequest$MaxPayloadInMB": "

The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater than, or equal to, the size of a single record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The default value is 6 MB.

For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the value to 0. This feature works only in supported algorithms. Currently, Amazon SageMaker built-in algorithms do not support HTTP chunked encoding.

", "DescribeTransformJobResponse$MaxPayloadInMB": "

The maximum payload size, in MB, used in the transform job.

", + "TransformJob$MaxPayloadInMB": "

The maximum allowed size of the payload, in MB. A payload is the data portion of a record (without metadata). The value in MaxPayloadInMB must be greater than, or equal to, the size of a single record. To estimate the size of a record in MB, divide the size of your dataset by the number of records. To ensure that the records fit within the maximum payload size, we recommend using a slightly larger value. The default value is 6 MB. For cases where the payload might be arbitrarily large and is transmitted using HTTP chunked encoding, set the value to 0. This feature works only in supported algorithms. Currently, SageMaker built-in algorithms do not support HTTP chunked encoding.

", "TransformJobDefinition$MaxPayloadInMB": "

The maximum payload size allowed, in MB. A payload is the data portion of a record (without metadata).

" } }, @@ -3609,6 +3697,7 @@ "ListTrialComponentsRequest$MaxResults": "

The maximum number of components to return in the response. The default value is 10.

", "ListTrialsRequest$MaxResults": "

The maximum number of trials to return in the response. The default value is 10.

", "ListUserProfilesRequest$MaxResults": "

Returns a list up to a specified limit.

", + "ListWorkforcesRequest$MaxResults": "

The maximum number of workforces returned in the response.

", "ListWorkteamsRequest$MaxResults": "

The maximum number of work teams to return in each page of the response.

", "SearchRequest$MaxResults": "

The maximum number of results to return.

" } @@ -3716,7 +3805,8 @@ "base": "

Configures the timeout and maximum number of retries for processing a transform job invocation.

", "refs": { "CreateTransformJobRequest$ModelClientConfig": "

Configures the timeout and maximum number of retries for processing a transform job invocation.

", - "DescribeTransformJobResponse$ModelClientConfig": "

The timeout and maximum number of retries for processing a transform job invocation.

" + "DescribeTransformJobResponse$ModelClientConfig": "

The timeout and maximum number of retries for processing a transform job invocation.

", + "TransformJob$ModelClientConfig": null } }, "ModelName": { @@ -3729,7 +3819,8 @@ "DescribeModelOutput$ModelName": "

Name of the Amazon SageMaker model.

", "DescribeTransformJobResponse$ModelName": "

The name of the model used in the transform job.

", "ModelSummary$ModelName": "

The name of the model that you want a summary for.

", - "ProductionVariant$ModelName": "

The name of the model that you want to host. This is the name that you specified when creating the model.

" + "ProductionVariant$ModelName": "

The name of the model that you want to host. This is the name that you specified when creating the model.

", + "TransformJob$ModelName": "

The name of the model associated with the transform job.

" } }, "ModelNameContains": { @@ -4115,6 +4206,8 @@ "ListTrialsResponse$NextToken": "

A token for getting the next set of trials, if there are any.

", "ListUserProfilesRequest$NextToken": "

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

", "ListUserProfilesResponse$NextToken": "

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

", + "ListWorkforcesRequest$NextToken": "

A token to resume pagination.

", + "ListWorkforcesResponse$NextToken": "

A token to resume pagination.

", "ListWorkteamsRequest$NextToken": "

If the result of the previous ListWorkteams request was truncated, the response includes a NextToken. To retrieve the next set of labeling jobs, use the token in the next request.

", "ListWorkteamsResponse$NextToken": "

If the response is truncated, Amazon SageMaker returns this token. To retrieve the next set of work teams, use it in the subsequent request.

", "SearchRequest$NextToken": "

If more than MaxResults resources match the specified SearchExpression, the response includes a NextToken. The NextToken can be passed to the next SearchRequest to continue retrieving results.

", @@ -4337,6 +4430,42 @@ "HyperParameterTuningJobSummary$ObjectiveStatusCounters": "

The ObjectiveStatusCounters object that specifies the numbers of training jobs, categorized by objective metric status, that this tuning job launched.

" } }, + "OidcConfig": { + "base": "

Use this parameter to configure your OIDC Identity Provider (IdP).

", + "refs": { + "CreateWorkforceRequest$OidcConfig": "

Use this parameter to configure a private workforce using your own OIDC Identity Provider. Do not use CognitoConfig if you specify values for OidcConfig.

", + "UpdateWorkforceRequest$OidcConfig": "

Use this parameter to update your OIDC Identity Provider (IdP) configuration for a workforce made using your own IdP.

" + } + }, + "OidcConfigForResponse": { + "base": "

Your Amazon Cognito workforce configuration.

", + "refs": { + "Workforce$OidcConfig": "

The configuration of an OIDC Identity Provider (IdP) private workforce.

" + } + }, + "OidcEndpoint": { + "base": null, + "refs": { + "OidcConfig$Issuer": "

The OIDC IdP issuer used to configure your private workforce.

", + "OidcConfig$AuthorizationEndpoint": "

The OIDC IdP authorization endpoint used to configure your private workforce.

", + "OidcConfig$TokenEndpoint": "

The OIDC IdP token endpoint used to configure your private workforce.

", + "OidcConfig$UserInfoEndpoint": "

The OIDC IdP user information endpoint used to configure your private workforce.

", + "OidcConfig$LogoutEndpoint": "

The OIDC IdP logout endpoint used to configure your private workforce.

", + "OidcConfig$JwksUri": "

The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

", + "OidcConfigForResponse$Issuer": "

The OIDC IdP issuer used to configure your private workforce.

", + "OidcConfigForResponse$AuthorizationEndpoint": "

The OIDC IdP authorization endpoint used to configure your private workforce.

", + "OidcConfigForResponse$TokenEndpoint": "

The OIDC IdP token endpoint used to configure your private workforce.

", + "OidcConfigForResponse$UserInfoEndpoint": "

The OIDC IdP user information endpoint used to configure your private workforce.

", + "OidcConfigForResponse$LogoutEndpoint": "

The OIDC IdP logout endpoint used to configure your private workforce.

", + "OidcConfigForResponse$JwksUri": "

The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

" + } + }, + "OidcMemberDefinition": { + "base": "

A list user groups that exist in your OIDC Identity Provider (IdP). One to ten groups can be used to create a single private work team. When you add a user group to the list of Groups, you can add that user group to one or more private work teams. If you add a user group to a private work team, all workers in that user group are added to the work team.

", + "refs": { + "MemberDefinition$OidcMemberDefinition": "

A list user groups that exist in your OIDC Identity Provider (IdP). One to ten groups can be used to create a single private work team. When you add a user group to the list of Groups, you can add that user group to one or more private work teams. If you add a user group to a private work team, all workers in that user group are added to the work team.

" + } + }, "Operator": { "base": null, "refs": { @@ -4374,7 +4503,7 @@ } }, "OutputConfig": { - "base": "

Contains information about the output location for the compiled model and the device (target) that the model runs on.

", + "base": "

Contains information about the output location for the compiled model and the target device that the model runs on. TargetDevice and TargetPlatform are mutually exclusive, so you need to choose one between the two to specify your target device or platform. If you cannot find your device you want to use from the TargetDevice list, use TargetPlatform to describe the platform of your edge device and CompilerOptions if there are specific settings that are required or recommended to use for particular TargetPlatform.

", "refs": { "CreateCompilationJobRequest$OutputConfig": "

Provides information about the output location for the compiled model and the target device the model runs on.

", "DescribeCompilationJobResponse$OutputConfig": "

Information about the output location for the compiled model and the target device that the model runs on.

" @@ -5025,7 +5154,7 @@ "MonitoringAppSpecification$PostAnalyticsProcessorSourceUri": "

An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.

", "MonitoringConstraintsResource$S3Uri": "

The Amazon S3 URI for the constraints resource.

", "MonitoringStatisticsResource$S3Uri": "

The Amazon S3 URI for the statistics resource.

", - "OutputConfig$S3OutputLocation": "

Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

", + "OutputConfig$S3OutputLocation": "

Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

", "OutputDataConfig$S3OutputPath": "

Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

", "ProcessingS3Input$S3Uri": "

The URI for the Amazon S3 storage where you want Amazon SageMaker to download the artifacts needed to run a processing job.

", "ProcessingS3Output$S3Uri": "

A URI that identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of a processing job.

", @@ -5033,16 +5162,10 @@ "SharingSettings$S3OutputPath": "

When NotebookOutputOption is Allowed, the Amazon S3 bucket used to save the notebook cell output. If S3OutputPath isn't specified, a default bucket is used.

", "TensorBoardOutputConfig$S3OutputPath": "

Path to Amazon S3 storage location for TensorBoard output.

", "TransformOutput$S3OutputPath": "

The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix.

For every S3 object used as input for the transform job, batch transform stores the transformed data with an .out suffix in a corresponding subfolder in the location in the output prefix. For example, for the input data stored at s3://bucket-name/input-name-prefix/dataset01/data.csv, batch transform stores the transformed data at s3://bucket-name/output-name-prefix/input-name-prefix/data.csv.out. Batch transform doesn't upload partially processed objects. For an input S3 object that contains multiple records, it creates an .out file only if the transform job succeeds on the entire file. When the input contains multiple S3 objects, the batch transform job processes the listed S3 objects and uploads only the output for successfully processed objects. If any object fails in the transform job batch transform marks the job as failed to prompt investigation.

", - "TransformS3DataSource$S3Uri": "

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

  • A key name prefix might look like this: s3://bucketname/exampleprefix.

  • A manifest might look like this: s3://bucketname/example.manifest

    The manifest is an S3 object which is a JSON file with the following format:

    [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

    \"relative/path/to/custdata-1\",

    \"relative/path/custdata-2\",

    ...

    \"relative/path/custdata-N\"

    ]

    The preceding JSON matches the following s3Uris:

    s3://customer_bucket/some/prefix/relative/path/to/custdata-1

    s3://customer_bucket/some/prefix/relative/path/custdata-2

    ...

    s3://customer_bucket/some/prefix/relative/path/custdata-N

    The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

", + "TransformS3DataSource$S3Uri": "

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

  • A key name prefix might look like this: s3://bucketname/exampleprefix.

  • A manifest might look like this: s3://bucketname/example.manifest

    The manifest is an S3 object which is a JSON file with the following format:

    [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

    \"relative/path/to/custdata-1\",

    \"relative/path/custdata-2\",

    ...

    \"relative/path/custdata-N\"

    ]

    The preceding JSON matches the following S3Uris:

    s3://customer_bucket/some/prefix/relative/path/to/custdata-1

    s3://customer_bucket/some/prefix/relative/path/custdata-2

    ...

    s3://customer_bucket/some/prefix/relative/path/custdata-N

    The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

", "UiConfig$UiTemplateS3Uri": "

The Amazon S3 bucket location of the UI template, or worker task template. This is the template used to render the worker UI and tools for labeling job tasks. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

" } }, - "SageMakerImageArn": { - "base": null, - "refs": { - "ResourceSpec$SageMakerImageArn": "

The Amazon Resource Name (ARN) of the SageMaker image created on the instance.

" - } - }, "SamplingPercentage": { "base": null, "refs": { @@ -5223,6 +5346,7 @@ "ListTrialComponentsRequest$SortOrder": "

The sort order. The default value is Descending.

", "ListTrialsRequest$SortOrder": "

The sort order. The default value is Descending.

", "ListUserProfilesRequest$SortOrder": "

The sort order for the results. The default is Ascending.

", + "ListWorkforcesRequest$SortOrder": "

Sort workforces in ascending or descending order.

", "ListWorkteamsRequest$SortOrder": "

The sort order for results. The default is Ascending.

" } }, @@ -5260,6 +5384,7 @@ "SourceIpConfig": { "base": "

A list of IP address ranges (CIDRs). Used to create an allow list of IP addresses for a private workforce. For more information, see .

", "refs": { + "CreateWorkforceRequest$SourceIpConfig": null, "UpdateWorkforceRequest$SourceIpConfig": "

A list of one to ten worker IP address ranges (CIDRs) that can be used to access tasks assigned to this workforce.

Maximum: Ten CIDR values

", "Workforce$SourceIpConfig": "

A list of one to ten IP address ranges (CIDRs) to be added to the workforce allow list.

" } @@ -5374,6 +5499,7 @@ "UserContext$UserProfileArn": "

The Amazon Resource Name (ARN) of the user's profile.

", "UserContext$UserProfileName": "

The name of the user's profile.

", "UserContext$DomainId": "

The domain associated with the user.

", + "Workforce$SubDomain": "

The subdomain for your OIDC Identity Provider.

", "Workteam$SubDomain": "

The URI of the labeling job's user interface. Workers open this URI to start labeling your data objects.

" } }, @@ -5494,12 +5620,14 @@ "CreateTrialComponentRequest$Tags": "

A list of tags to associate with the component. You can use Search API to search on the tags.

", "CreateTrialRequest$Tags": "

A list of tags to associate with the trial. You can use Search API to search on the tags.

", "CreateUserProfileRequest$Tags": "

Each tag consists of a key and an optional value. Tag keys must be unique per resource.

", + "CreateWorkforceRequest$Tags": "

An array of key-value pairs that contain metadata to help you categorize and organize our workforce. Each tag consists of a key and a value, both of which you define.

", "CreateWorkteamRequest$Tags": "

An array of key-value pairs.

For more information, see Resource Tag and Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", "DescribeLabelingJobResponse$Tags": "

An array of key/value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", "Experiment$Tags": "

The list of tags that are associated with the experiment. You can use Search API to search on the tags.

", "ListTagsOutput$Tags": "

An array of Tag objects, each with a tag key and a value.

", "ProcessingJob$Tags": "

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", "TrainingJob$Tags": "

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

", + "TransformJob$Tags": "

A list of tags associated with the transform job.

", "Trial$Tags": "

The list of tags that are associated with the trial. You can use Search API to search on the tags.

", "TrialComponent$Tags": "

The list of tags that are associated with the component. You can use Search API to search on the tags.

" } @@ -5519,8 +5647,8 @@ "TargetDevice": { "base": null, "refs": { - "CompilationJobSummary$CompilationTargetDevice": "

The type of device that the model will run on after compilation has completed.

", - "OutputConfig$TargetDevice": "

Identifies the device that you want to run your model on after it has been compiled. For example: ml_c5.

" + "CompilationJobSummary$CompilationTargetDevice": "

The type of device that the model will run on after the compilation job has completed.

", + "OutputConfig$TargetDevice": "

Identifies the target device or the machine learning instance that you want to run your model on after the compilation has completed. Alternatively, you can specify OS, architecture, and accelerator using TargetPlatform fields. It can be used instead of TargetPlatform.

" } }, "TargetObjectiveMetricValue": { @@ -5529,6 +5657,33 @@ "TuningJobCompletionCriteria$TargetObjectiveMetricValue": "

The objective metric's value.

" } }, + "TargetPlatform": { + "base": "

Contains information about a target platform that you want your model to run on, such as OS, architecture, and accelerators. It is an alternative of TargetDevice.

", + "refs": { + "OutputConfig$TargetPlatform": "

Contains information about a target platform that you want your model to run on, such as OS, architecture, and accelerators. It is an alternative of TargetDevice.

The following examples show how to configure the TargetPlatform and CompilerOptions JSON strings for popular target platforms:

  • Raspberry Pi 3 Model B+

    \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},

    \"CompilerOptions\": {'mattr': ['+neon']}

  • Jetson TX2

    \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"NVIDIA\"},

    \"CompilerOptions\": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'}

  • EC2 m5.2xlarge instance OS

    \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\": \"NVIDIA\"},

    \"CompilerOptions\": {'mcpu': 'skylake-avx512'}

  • RK3399

    \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"MALI\"}

  • ARMv7 phone (CPU)

    \"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},

    \"CompilerOptions\": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']}

  • ARMv8 phone (CPU)

    \"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},

    \"CompilerOptions\": {'ANDROID_PLATFORM': 29}

" + } + }, + "TargetPlatformAccelerator": { + "base": null, + "refs": { + "CompilationJobSummary$CompilationTargetPlatformAccelerator": "

The type of accelerator that the model will run on after the compilation job has completed.

", + "TargetPlatform$Accelerator": "

Specifies a target platform accelerator (optional).

  • NVIDIA: Nvidia graphics processing unit. It also requires gpu-code, trt-ver, cuda-ver compiler options

  • MALI: ARM Mali graphics processor

  • INTEL_GRAPHICS: Integrated Intel graphics

" + } + }, + "TargetPlatformArch": { + "base": null, + "refs": { + "CompilationJobSummary$CompilationTargetPlatformArch": "

The type of architecture that the model will run on after the compilation job has completed.

", + "TargetPlatform$Arch": "

Specifies a target platform architecture.

  • X86_64: 64-bit version of the x86 instruction set.

  • X86: 32-bit version of the x86 instruction set.

  • ARM64: ARMv8 64-bit CPU.

  • ARM_EABIHF: ARMv7 32-bit, Hard Float.

  • ARM_EABI: ARMv7 32-bit, Soft Float. Used by Android 32-bit ARM platform.

" + } + }, + "TargetPlatformOs": { + "base": null, + "refs": { + "CompilationJobSummary$CompilationTargetPlatformOs": "

The type of OS that the model will run on after the compilation job has completed.

", + "TargetPlatform$Os": "

Specifies a target platform OS.

  • LINUX: Linux-based operating systems.

  • ANDROID: Android operating systems. Android API level can be specified using the ANDROID_PLATFORM compiler option. For example, \"CompilerOptions\": {'ANDROID_PLATFORM': 28}

" + } + }, "TaskAvailabilityLifetimeInSeconds": { "base": null, "refs": { @@ -5769,6 +5924,9 @@ "TrainingJobSummary$CreationTime": "

A timestamp that shows when the training job was created.

", "TrainingJobSummary$TrainingEndTime": "

A timestamp that shows when the training job ended. This field is set only if the training job has one of the terminal statuses (Completed, Failed, or Stopped).

", "TrainingJobSummary$LastModifiedTime": "

Timestamp when the training job was last modified.

", + "TransformJob$CreationTime": "

A timestamp that shows when the transform Job was created.

", + "TransformJob$TransformStartTime": "

Indicates when the transform job starts on ML instances. You are billed for the time interval between this time and the value of TransformEndTime.

", + "TransformJob$TransformEndTime": "

Indicates when the transform job has been completed, or has stopped or failed. You are billed for the time interval between this time and the value of TransformStartTime.

", "TransformJobSummary$CreationTime": "

A timestamp that shows when the transform Job was created.

", "TransformJobSummary$TransformEndTime": "

Indicates when the transform job ends on compute instances. For successful jobs and stopped jobs, this is the exact time recorded after the results are uploaded. For failed jobs, this is when Amazon SageMaker detected that the job failed.

", "TransformJobSummary$LastModifiedTime": "

Indicates when the transform job was last modified.

", @@ -5789,6 +5947,7 @@ "UpdateTrialComponentRequest$StartTime": "

When the component started.

", "UpdateTrialComponentRequest$EndTime": "

When the component ended.

", "Workforce$LastUpdatedDate": "

The most recent date that was used to successfully add one or more IP address ranges (CIDRs) to a private workforce's allow list.

", + "Workforce$CreateDate": "

The date that the workforce is created.

", "Workteam$CreateDate": "

The date and time that the work team was created (timestamp).

", "Workteam$LastUpdatedDate": "

The date and time that the work team was last updated (timestamp).

" } @@ -5942,6 +6101,7 @@ "refs": { "CreateTransformJobRequest$Environment": "

The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.

", "DescribeTransformJobResponse$Environment": "

The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.

", + "TransformJob$Environment": "

The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.

", "TransformJobDefinition$Environment": "

The environment variables to set in the Docker container. We support up to 16 key and values entries in the map.

" } }, @@ -5956,6 +6116,7 @@ "refs": { "CreateTransformJobRequest$TransformInput": "

Describes the input source and the way the transform job consumes it.

", "DescribeTransformJobResponse$TransformInput": "

Describes the dataset to be transformed and the Amazon S3 location where it is stored.

", + "TransformJob$TransformInput": null, "TransformJobDefinition$TransformInput": "

A description of the input source and the way the transform job consumes it.

" } }, @@ -5978,11 +6139,18 @@ "InferenceSpecification$SupportedTransformInstanceTypes": "

A list of the instance types on which a transformation job can be run or on which an endpoint can be deployed.

" } }, + "TransformJob": { + "base": "

A batch transform job. For information about SageMaker batch transform, see Use Batch Transform.

", + "refs": { + "TrialComponentSourceDetail$TransformJob": "

Information about a transform job that's the source of the trial component.

" + } + }, "TransformJobArn": { "base": null, "refs": { "CreateTransformJobResponse$TransformJobArn": "

The Amazon Resource Name (ARN) of the transform job.

", "DescribeTransformJobResponse$TransformJobArn": "

The Amazon Resource Name (ARN) of the transform job.

", + "TransformJob$TransformJobArn": "

The Amazon Resource Name (ARN) of the transform job.

", "TransformJobSummary$TransformJobArn": "

The Amazon Resource Name (ARN) of the transform job.

" } }, @@ -6000,6 +6168,7 @@ "DescribeTransformJobRequest$TransformJobName": "

The name of the transform job that you want to view details of.

", "DescribeTransformJobResponse$TransformJobName": "

The name of the transform job.

", "StopTransformJobRequest$TransformJobName": "

The name of the transform job to stop.

", + "TransformJob$TransformJobName": "

The name of the transform job.

", "TransformJobSummary$TransformJobName": "

The name of the transform job.

" } }, @@ -6008,6 +6177,7 @@ "refs": { "DescribeTransformJobResponse$TransformJobStatus": "

The status of the transform job. If the transform job failed, the reason is returned in the FailureReason field.

", "ListTransformJobsRequest$StatusEquals": "

A filter that retrieves only transform jobs with a specific status.

", + "TransformJob$TransformJobStatus": "

The status of the transform job.

Transform job statuses are:

  • InProgress - The job is in progress.

  • Completed - The job has completed.

  • Failed - The transform job has failed. To see the reason for the failure, see the FailureReason field in the response to a DescribeTransformJob call.

  • Stopping - The transform job is stopping.

  • Stopped - The transform job has stopped.

", "TransformJobSummary$TransformJobStatus": "

The status of the transform job.

" } }, @@ -6028,6 +6198,7 @@ "refs": { "CreateTransformJobRequest$TransformOutput": "

Describes the results of the transform job.

", "DescribeTransformJobResponse$TransformOutput": "

Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

", + "TransformJob$TransformOutput": null, "TransformJobDefinition$TransformOutput": "

Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

" } }, @@ -6036,6 +6207,7 @@ "refs": { "CreateTransformJobRequest$TransformResources": "

Describes the resources, including ML instance types and ML instance count, to use for the transform job.

", "DescribeTransformJobResponse$TransformResources": "

Describes the resources, including ML instance types and ML instance count, to use for the transform job.

", + "TransformJob$TransformResources": null, "TransformJobDefinition$TransformResources": "

Identifies the ML compute instances for the transform job.

" } }, @@ -6569,21 +6741,34 @@ "base": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", "refs": { "DescribeWorkforceResponse$Workforce": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", - "UpdateWorkforceResponse$Workforce": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

" + "UpdateWorkforceResponse$Workforce": "

A single private workforce, which is automatically created when you create your first private work team. You can create one private work force in each AWS Region. By default, any workforce-related API operation used in a specific region will apply to the workforce created in that region. To learn how to create a private workforce, see Create a Private Workforce.

", + "Workforces$member": null } }, "WorkforceArn": { "base": null, "refs": { - "Workforce$WorkforceArn": "

The Amazon Resource Name (ARN) of the private workforce.

" + "CreateWorkforceResponse$WorkforceArn": "

The Amazon Resource Name (ARN) of the workforce.

", + "Workforce$WorkforceArn": "

The Amazon Resource Name (ARN) of the private workforce.

", + "Workteam$WorkforceArn": "

The Amazon Resource Name (ARN) of the workforce.

" } }, "WorkforceName": { "base": null, "refs": { + "CreateWorkforceRequest$WorkforceName": "

The name of the private workforce.

", + "CreateWorkteamRequest$WorkforceName": "

The name of the workforce.

", + "DeleteWorkforceRequest$WorkforceName": "

The name of the workforce.

", "DescribeWorkforceRequest$WorkforceName": "

The name of the private workforce whose access you want to restrict. WorkforceName is automatically set to default when a workforce is created and cannot be modified.

", + "ListWorkforcesRequest$NameContains": "

A filter you can use to search for workforces using part of the workforce name.

", "UpdateWorkforceRequest$WorkforceName": "

The name of the private workforce whose access you want to restrict. WorkforceName is automatically set to default when a workforce is created and cannot be modified.

", - "Workforce$WorkforceName": "

The name of the private workforce whose access you want to restrict. WorkforceName is automatically set to default when a workforce is created and cannot be modified.

" + "Workforce$WorkforceName": "

The name of the private workforce.

" + } + }, + "Workforces": { + "base": null, + "refs": { + "ListWorkforcesResponse$Workforces": "

A list containing information about your workforce.

" } }, "Workteam": { diff --git a/models/apis/sagemaker/2017-07-24/paginators-1.json b/models/apis/sagemaker/2017-07-24/paginators-1.json index 0d5eb880392..7f87ca77493 100644 --- a/models/apis/sagemaker/2017-07-24/paginators-1.json +++ b/models/apis/sagemaker/2017-07-24/paginators-1.json @@ -180,6 +180,12 @@ "limit_key": "MaxResults", "result_key": "UserProfiles" }, + "ListWorkforces": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Workforces" + }, "ListWorkteams": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/service/cloudwatch/api.go b/service/cloudwatch/api.go index 635d88c5b8e..5d7f1bb2a4f 100644 --- a/service/cloudwatch/api.go +++ b/service/cloudwatch/api.go @@ -246,7 +246,7 @@ func (c *CloudWatch) DeleteDashboardsRequest(input *DeleteDashboardsInput) (req // DeleteDashboards API operation for Amazon CloudWatch. // -// Deletes all dashboards that you specify. You may specify up to 100 dashboards +// Deletes all dashboards that you specify. You can specify up to 100 dashboards // to delete. If there is an error during this call, no dashboards are deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -335,8 +335,7 @@ func (c *CloudWatch) DeleteInsightRulesRequest(input *DeleteInsightRulesInput) ( // Permanently deletes the specified Contributor Insights rules. // // If you create a rule, delete it, and then re-create it with the same name, -// historical data from the first time the rule was created may or may not be -// available. +// historical data from the first time the rule was created might not be available. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1420,7 +1419,7 @@ func (c *CloudWatch) GetInsightRuleReportRequest(input *GetInsightRuleReportInpu // point. // // * MaxContributorValue -- the value of the top contributor for each data -// point. The identity of the contributor may change for each data point +// point. The identity of the contributor might change for each data point // in the graph. If this rule aggregates by COUNT, the top contributor for // each data point is the contributor with the most occurrences in that period. // If the rule aggregates by SUM, the top contributor is the contributor @@ -1569,9 +1568,9 @@ func (c *CloudWatch) GetMetricDataRequest(input *GetMetricDataInput) (req *reque // If you omit Unit in your request, all data that was collected with any unit // is returned, along with the corresponding units that were specified when // the data was reported to CloudWatch. If you specify a unit, the operation -// returns only data data that was collected with that unit specified. If you -// specify a unit that does not match the data collected, the results of the -// operation are null. CloudWatch does not perform unit conversions. +// returns only data that was collected with that unit specified. If you specify +// a unit that does not match the data collected, the results of the operation +// are null. CloudWatch does not perform unit conversions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2091,9 +2090,13 @@ func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.R // Up to 500 results are returned for any one call. To retrieve additional results, // use the returned token with subsequent calls. // -// After you create a metric, allow up to fifteen minutes before the metric -// appears. Statistics about the metric, however, are available sooner using -// GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// After you create a metric, allow up to 15 minutes before the metric appears. +// You can see statistics about the metric sooner by using GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html). +// +// ListMetrics doesn't return information about metrics if those metrics haven't +// reported data in the past two weeks. To retrieve those metrics, use GetMetricData +// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) // or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2630,8 +2633,7 @@ func (c *CloudWatch) PutInsightRuleRequest(input *PutInsightRuleInput) (req *req // Analyze High-Cardinality Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html). // // If you create a rule, delete it, and then re-create it with the same name, -// historical data from the first time the rule was created may or may not be -// available. +// historical data from the first time the rule was created might not be available. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2982,8 +2984,8 @@ func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *reque // DescribeAlarmHistory (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarmHistory.html). // // If you use SetAlarmState on a composite alarm, the composite alarm is not -// guaranteed to return to its actual state. It will return to its actual state -// only once any of its children alarms change state. It is also re-evaluated +// guaranteed to return to its actual state. It returns to its actual state +// only once any of its children alarms change state. It is also reevaluated // if you update its configuration. // // If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling @@ -3076,7 +3078,7 @@ func (c *CloudWatch) TagResourceRequest(input *TagResourceInput) (req *request.R // Contributor Insights rules. // // Tags can help you organize and categorize your resources. You can also use -// them to scope user permissions, by granting a user permission to access or +// them to scope user permissions by granting a user permission to access or // change only resources with certain tag values. // // Tags don't have any semantic meaning to AWS and are interpreted strictly @@ -4321,8 +4323,8 @@ type DescribeAlarmsInput struct { // is not returned. // // If you specify ChildrenOfAlarmName, you cannot specify any other parameters - // in the request except for MaxRecords and NextToken. If you do so, you will - // receive a validation error. + // in the request except for MaxRecords and NextToken. If you do so, you receive + // a validation error. // // Only the Alarm Name, ARN, StateValue (OK/ALARM/INSUFFICIENT_DATA), and StateUpdatedTimestamp // information are returned by this operation when you use this parameter. To @@ -4344,8 +4346,8 @@ type DescribeAlarmsInput struct { // alarm that you specify in ParentsOfAlarmName is not returned. // // If you specify ParentsOfAlarmName, you cannot specify any other parameters - // in the request except for MaxRecords and NextToken. If you do so, you will - // receive a validation error. + // in the request except for MaxRecords and NextToken. If you do so, you receive + // a validation error. // // Only the Alarm Name and ARN are returned by this operation when you use this // parameter. To get complete information about these alarms, perform another @@ -4624,7 +4626,7 @@ type DescribeInsightRulesInput struct { _ struct{} `type:"structure"` // This parameter is not currently used. Reserved for future use. If it is used - // in the future, the maximum value may be different. + // in the future, the maximum value might be different. MaxResults *int64 `min:"1" type:"integer"` // Reserved for future use. @@ -4698,16 +4700,20 @@ func (s *DescribeInsightRulesOutput) SetNextToken(v string) *DescribeInsightRule return s } -// Expands the identity of a metric. +// A dimension is a name/value pair that is part of the identity of a metric. +// You can assign up to 10 dimensions to a metric. Because dimensions are part +// of the unique identifier for a metric, whenever you add a unique name/value +// pair to one of your metrics, you are creating a new variation of that metric. type Dimension struct { _ struct{} `type:"structure"` - // The name of the dimension. + // The name of the dimension. Dimension names cannot contain blank spaces or + // non-ASCII characters. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The value representing the dimension measurement. + // The value of the dimension. // // Value is a required field Value *string `min:"1" type:"string" required:"true"` @@ -5142,7 +5148,7 @@ type GetInsightRuleReportInput struct { // point. // // * MaxContributorValue -- the value of the top contributor for each data - // point. The identity of the contributor may change for each data point + // point. The identity of the contributor might change for each data point // in the graph. If this rule aggregates by COUNT, the top contributor for // each data point is the contributor with the most occurrences in that period. // If the rule aggregates by SUM, the top contributor is the contributor @@ -5496,7 +5502,7 @@ type GetMetricDataOutput struct { _ struct{} `type:"structure"` // Contains a message about this GetMetricData operation, if the operation results - // in such a message. An example of a message that may be returned is Maximum + // in such a message. An example of a message that might be returned is Maximum // number of allowed metrics exceeded. If there is a message, as much of the // operation as possible is still executed. // @@ -5640,9 +5646,9 @@ type GetMetricStatisticsInput struct { // The unit for a given metric. If you omit Unit, all data that was collected // with any unit is returned, along with the corresponding units that were specified // when the data was reported to CloudWatch. If you specify a unit, the operation - // returns only data data that was collected with that unit specified. If you - // specify a unit that does not match the data collected, the results of the - // operation are null. CloudWatch does not perform unit conversions. + // returns only data that was collected with that unit specified. If you specify + // a unit that does not match the data collected, the results of the operation + // are null. CloudWatch does not perform unit conversions. Unit *string `type:"string" enum:"StandardUnit"` } @@ -5880,7 +5886,7 @@ func (s *GetMetricWidgetImageInput) SetOutputFormat(v string) *GetMetricWidgetIm type GetMetricWidgetImageOutput struct { _ struct{} `type:"structure"` - // The image of the graph, in the output format specified. + // The image of the graph, in the output format specified. The output is base64-encoded. // // MetricWidgetImage is automatically base64 encoded/decoded by the SDK. MetricWidgetImage []byte `type:"blob"` @@ -6263,6 +6269,15 @@ type ListMetricsInput struct { // The token returned by a previous call to indicate that there is more data // available. NextToken *string `type:"string"` + + // To filter the results to show only metrics that have had data points published + // in the past three hours, specify this parameter with a value of PT3H. This + // is the only valid value for this parameter. + // + // The results that are returned are an approximation of the value you specify. + // There is a low probability that the returned results include metrics with + // last published data as much as 40 minutes more than the specified time interval. + RecentlyActive *string `type:"string" enum:"RecentlyActive"` } // String returns the string representation @@ -6325,10 +6340,16 @@ func (s *ListMetricsInput) SetNextToken(v string) *ListMetricsInput { return s } +// SetRecentlyActive sets the RecentlyActive field's value. +func (s *ListMetricsInput) SetRecentlyActive(v string) *ListMetricsInput { + s.RecentlyActive = &v + return s +} + type ListMetricsOutput struct { _ struct{} `type:"structure"` - // The metrics. + // The metrics that match your request. Metrics []*Metric `type:"list"` // The token that marks the start of the next batch of returned results. @@ -6366,7 +6387,7 @@ type ListTagsForResourceInput struct { // // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name // - // For more information on ARN format, see Resource Types Defined by Amazon + // For more information about ARN format, see Resource Types Defined by Amazon // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) // in the Amazon Web Services General Reference. // @@ -7251,9 +7272,9 @@ type MetricStat struct { // In a Get operation, if you omit Unit then all data that was collected with // any unit is returned, along with the corresponding units that were specified // when the data was reported to CloudWatch. If you specify a unit, the operation - // returns only data data that was collected with that unit specified. If you - // specify a unit that does not match the data collected, the results of the - // operation are null. CloudWatch does not perform unit conversions. + // returns only data that was collected with that unit specified. If you specify + // a unit that does not match the data collected, the results of the operation + // are null. CloudWatch does not perform unit conversions. Unit *string `type:"string" enum:"StandardUnit"` } @@ -7512,8 +7533,7 @@ type PutCompositeAlarmInput struct { // The description for the composite alarm. AlarmDescription *string `type:"string"` - // The name for the composite alarm. This name must be unique within your AWS - // account. + // The name for the composite alarm. This name must be unique within the Region. // // AlarmName is a required field AlarmName *string `min:"1" type:"string" required:"true"` @@ -7767,7 +7787,7 @@ type PutDashboardOutput struct { // // If this result includes only warning messages, then the input was valid enough // for the dashboard to be created or modified, but some elements of the dashboard - // may not render. + // might not render. // // If this result includes error messages, the input was not valid and the operation // failed. @@ -7928,7 +7948,7 @@ type PutMetricAlarmInput struct { // The description for the alarm. AlarmDescription *string `type:"string"` - // The name for the alarm. This name must be unique within your AWS account. + // The name for the alarm. This name must be unique within the Region. // // AlarmName is a required field AlarmName *string `min:"1" type:"string" required:"true"` @@ -8045,7 +8065,7 @@ type PutMetricAlarmInput struct { // a metric that does not have sub-minute resolution, the alarm still attempts // to gather data at the period rate that you specify. In this case, it does // not receive data for the attempts that do not correspond to a one-minute - // data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. + // data resolution, and the alarm might often lapse into INSUFFICENT_DATA status. // Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which // has a higher charge than other alarms. For more information about pricing, // see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/). @@ -8064,7 +8084,7 @@ type PutMetricAlarmInput struct { // as many as 50 tags with an alarm. // // Tags can help you organize and categorize your resources. You can also use - // them to scope user permissions, by granting a user permission to access or + // them to scope user permissions by granting a user permission to access or // change only resources with certain tag values. Tags []*Tag `type:"list"` @@ -8098,12 +8118,12 @@ type PutMetricAlarmInput struct { // Percent, are aggregated separately. // // If you don't specify Unit, CloudWatch retrieves all unit types that have - // been published for the metric and attempts to evaluate the alarm. Usually - // metrics are published with only one unit, so the alarm will work as intended. + // been published for the metric and attempts to evaluate the alarm. Usually, + // metrics are published with only one unit, so the alarm works as intended. // // However, if the metric is published with multiple types of units and you - // don't specify a unit, the alarm's behavior is not defined and will behave - // un-predictably. + // don't specify a unit, the alarm's behavior is not defined and it behaves + // predictably. // // We recommend omitting Unit so that you don't inadvertently specify an incorrect // unit that is not published for this metric. Doing so causes the alarm to @@ -8485,8 +8505,7 @@ func (s *Range) SetStartTime(v time.Time) *Range { type SetAlarmStateInput struct { _ struct{} `type:"structure"` - // The name for the alarm. This name must be unique within the AWS account. - // The maximum length is 255 characters. + // The name of the alarm. // // AlarmName is a required field AlarmName *string `min:"1" type:"string" required:"true"` @@ -8726,7 +8745,7 @@ type TagResourceInput struct { // // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name // - // For more information on ARN format, see Resource Types Defined by Amazon + // For more information about ARN format, see Resource Types Defined by Amazon // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) // in the Amazon Web Services General Reference. // @@ -8813,7 +8832,7 @@ type UntagResourceInput struct { // // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name // - // For more information on ARN format, see Resource Types Defined by Amazon + // For more information about ARN format, see Resource Types Defined by Amazon // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) // in the Amazon Web Services General Reference. // @@ -8934,6 +8953,11 @@ const ( HistoryItemTypeAction = "Action" ) +const ( + // RecentlyActivePt3h is a RecentlyActive enum value + RecentlyActivePt3h = "PT3H" +) + const ( // ScanByTimestampDescending is a ScanBy enum value ScanByTimestampDescending = "TimestampDescending" diff --git a/service/frauddetector/api.go b/service/frauddetector/api.go index 7aaf9b86594..3452251e417 100644 --- a/service/frauddetector/api.go +++ b/service/frauddetector/api.go @@ -2746,100 +2746,6 @@ func (c *FraudDetector) GetOutcomesPagesWithContext(ctx aws.Context, input *GetO return p.Err() } -const opGetPrediction = "GetPrediction" - -// GetPredictionRequest generates a "aws/request.Request" representing the -// client's request for the GetPrediction operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPrediction for more information on using the GetPrediction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPredictionRequest method. -// req, resp := client.GetPredictionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/GetPrediction -func (c *FraudDetector) GetPredictionRequest(input *GetPredictionInput) (req *request.Request, output *GetPredictionOutput) { - op := &request.Operation{ - Name: opGetPrediction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetPredictionInput{} - } - - output = &GetPredictionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPrediction API operation for Amazon Fraud Detector. -// -// Evaluates an event against a detector version. If a version ID is not provided, -// the detector’s (ACTIVE) version is used. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Fraud Detector's -// API operation GetPrediction for usage and error information. -// -// Returned Error Types: -// * ValidationException -// An exception indicating a specified value is not allowed. -// -// * ResourceNotFoundException -// An exception indicating the specified resource was not found. -// -// * InternalServerException -// An exception indicating an internal server error. -// -// * ThrottlingException -// An exception indicating a throttling error. -// -// * AccessDeniedException -// An exception indicating Amazon Fraud Detector does not have the needed permissions. -// This can occur if you submit a request, such as PutExternalModel, that specifies -// a role that is not in your account. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/frauddetector-2019-11-15/GetPrediction -func (c *FraudDetector) GetPrediction(input *GetPredictionInput) (*GetPredictionOutput, error) { - req, out := c.GetPredictionRequest(input) - return out, req.Send() -} - -// GetPredictionWithContext is the same as GetPrediction with the addition of -// the ability to pass a context and additional request options. -// -// See GetPrediction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *FraudDetector) GetPredictionWithContext(ctx aws.Context, input *GetPredictionInput, opts ...request.Option) (*GetPredictionOutput, error) { - req, out := c.GetPredictionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - const opGetRules = "GetRules" // GetRulesRequest generates a "aws/request.Request" representing the @@ -5995,7 +5901,7 @@ type CreateVariableInput struct { // A collection of key and value pairs. Tags []*Tag `locationName:"tags" type:"list"` - // The variable type. + // The variable type. For more information see Variable types (https://docs.aws.amazon.com/frauddetector/latest/ug/create-a-variable.html#variable-types). // // Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | // BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE @@ -6205,7 +6111,7 @@ type DeleteDetectorVersionInput struct { // The ID of the detector version to delete. // // DetectorVersionId is a required field - DetectorVersionId *string `locationName:"detectorVersionId" type:"string" required:"true"` + DetectorVersionId *string `locationName:"detectorVersionId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -6230,6 +6136,9 @@ func (s *DeleteDetectorVersionInput) Validate() error { if s.DetectorVersionId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorVersionId")) } + if s.DetectorVersionId != nil && len(*s.DetectorVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorVersionId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6511,7 +6420,7 @@ type DescribeModelVersionsInput struct { ModelType *string `locationName:"modelType" type:"string" enum:"ModelTypeEnum"` // The model version number. - ModelVersionNumber *string `locationName:"modelVersionNumber" type:"string"` + ModelVersionNumber *string `locationName:"modelVersionNumber" min:"3" type:"string"` // The next token from the previous results. NextToken *string `locationName:"nextToken" type:"string"` @@ -6536,6 +6445,9 @@ func (s *DescribeModelVersionsInput) Validate() error { if s.ModelId != nil && len(*s.ModelId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ModelId", 1)) } + if s.ModelVersionNumber != nil && len(*s.ModelVersionNumber) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ModelVersionNumber", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6995,6 +6907,9 @@ type ExternalModel struct { // The input configuration. InputConfiguration *ModelInputConfiguration `locationName:"inputConfiguration" type:"structure"` + // The role used to invoke the model. + InvokeModelEndpointRoleArn *string `locationName:"invokeModelEndpointRoleArn" type:"string"` + // Timestamp of when the model was last updated. LastUpdatedTime *string `locationName:"lastUpdatedTime" type:"string"` @@ -7009,9 +6924,6 @@ type ExternalModel struct { // The output configuration. OutputConfiguration *ModelOutputConfiguration `locationName:"outputConfiguration" type:"structure"` - - // The role used to invoke the model. - Role *Role `locationName:"role" type:"structure"` } // String returns the string representation @@ -7048,6 +6960,12 @@ func (s *ExternalModel) SetInputConfiguration(v *ModelInputConfiguration) *Exter return s } +// SetInvokeModelEndpointRoleArn sets the InvokeModelEndpointRoleArn field's value. +func (s *ExternalModel) SetInvokeModelEndpointRoleArn(v string) *ExternalModel { + s.InvokeModelEndpointRoleArn = &v + return s +} + // SetLastUpdatedTime sets the LastUpdatedTime field's value. func (s *ExternalModel) SetLastUpdatedTime(v string) *ExternalModel { s.LastUpdatedTime = &v @@ -7078,12 +6996,6 @@ func (s *ExternalModel) SetOutputConfiguration(v *ModelOutputConfiguration) *Ext return s } -// SetRole sets the Role field's value. -func (s *ExternalModel) SetRole(v *Role) *ExternalModel { - s.Role = v - return s -} - // The message details. type FieldValidationMessage struct { _ struct{} `type:"structure"` @@ -7197,7 +7109,7 @@ type GetDetectorVersionInput struct { // The detector version ID. // // DetectorVersionId is a required field - DetectorVersionId *string `locationName:"detectorVersionId" type:"string" required:"true"` + DetectorVersionId *string `locationName:"detectorVersionId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -7222,6 +7134,9 @@ func (s *GetDetectorVersionInput) Validate() error { if s.DetectorVersionId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorVersionId")) } + if s.DetectorVersionId != nil && len(*s.DetectorVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorVersionId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7257,7 +7172,7 @@ type GetDetectorVersionOutput struct { DetectorId *string `locationName:"detectorId" min:"1" type:"string"` // The detector version ID. - DetectorVersionId *string `locationName:"detectorVersionId" type:"string"` + DetectorVersionId *string `locationName:"detectorVersionId" min:"1" type:"string"` // The Amazon SageMaker model endpoints included in the detector version. ExternalModelEndpoints []*string `locationName:"externalModelEndpoints" type:"list"` @@ -7549,7 +7464,7 @@ type GetEventPredictionInput struct { DetectorId *string `locationName:"detectorId" type:"string" required:"true"` // The detector version ID. - DetectorVersionId *string `locationName:"detectorVersionId" type:"string"` + DetectorVersionId *string `locationName:"detectorVersionId" min:"1" type:"string"` // The entity type (associated with the detector's event type) and specific // entity ID representing who performed the event. If an entity id is not available, @@ -7600,6 +7515,9 @@ func (s *GetEventPredictionInput) Validate() error { if s.DetectorId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorId")) } + if s.DetectorVersionId != nil && len(*s.DetectorVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorVersionId", 1)) + } if s.Entities == nil { invalidParams.Add(request.NewErrParamRequired("Entities")) } @@ -8046,7 +7964,7 @@ type GetModelVersionInput struct { // The model version number. // // ModelVersionNumber is a required field - ModelVersionNumber *string `locationName:"modelVersionNumber" type:"string" required:"true"` + ModelVersionNumber *string `locationName:"modelVersionNumber" min:"3" type:"string" required:"true"` } // String returns the string representation @@ -8074,6 +7992,9 @@ func (s *GetModelVersionInput) Validate() error { if s.ModelVersionNumber == nil { invalidParams.Add(request.NewErrParamRequired("ModelVersionNumber")) } + if s.ModelVersionNumber != nil && len(*s.ModelVersionNumber) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ModelVersionNumber", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8115,7 +8036,7 @@ type GetModelVersionOutput struct { ModelType *string `locationName:"modelType" type:"string" enum:"ModelTypeEnum"` // The model version number. - ModelVersionNumber *string `locationName:"modelVersionNumber" type:"string"` + ModelVersionNumber *string `locationName:"modelVersionNumber" min:"3" type:"string"` // The model version status. Status *string `locationName:"status" type:"string"` @@ -8372,138 +8293,6 @@ func (s *GetOutcomesOutput) SetOutcomes(v []*Outcome) *GetOutcomesOutput { return s } -type GetPredictionInput struct { - _ struct{} `type:"structure"` - - // The detector ID. - // - // DetectorId is a required field - DetectorId *string `locationName:"detectorId" type:"string" required:"true"` - - // The detector version ID. - DetectorVersionId *string `locationName:"detectorVersionId" type:"string"` - - // Names of variables you defined in Amazon Fraud Detector to represent event - // data elements and their corresponding values for the event you are sending - // for evaluation. - EventAttributes map[string]*string `locationName:"eventAttributes" type:"map"` - - // The unique ID used to identify the event. - // - // EventId is a required field - EventId *string `locationName:"eventId" type:"string" required:"true"` - - // The Amazon SageMaker model endpoint input data blobs. - ExternalModelEndpointDataBlobs map[string]*ModelEndpointDataBlob `locationName:"externalModelEndpointDataBlobs" type:"map" sensitive:"true"` -} - -// String returns the string representation -func (s GetPredictionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPredictionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPredictionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPredictionInput"} - if s.DetectorId == nil { - invalidParams.Add(request.NewErrParamRequired("DetectorId")) - } - if s.EventId == nil { - invalidParams.Add(request.NewErrParamRequired("EventId")) - } - if s.ExternalModelEndpointDataBlobs != nil { - for i, v := range s.ExternalModelEndpointDataBlobs { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExternalModelEndpointDataBlobs", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDetectorId sets the DetectorId field's value. -func (s *GetPredictionInput) SetDetectorId(v string) *GetPredictionInput { - s.DetectorId = &v - return s -} - -// SetDetectorVersionId sets the DetectorVersionId field's value. -func (s *GetPredictionInput) SetDetectorVersionId(v string) *GetPredictionInput { - s.DetectorVersionId = &v - return s -} - -// SetEventAttributes sets the EventAttributes field's value. -func (s *GetPredictionInput) SetEventAttributes(v map[string]*string) *GetPredictionInput { - s.EventAttributes = v - return s -} - -// SetEventId sets the EventId field's value. -func (s *GetPredictionInput) SetEventId(v string) *GetPredictionInput { - s.EventId = &v - return s -} - -// SetExternalModelEndpointDataBlobs sets the ExternalModelEndpointDataBlobs field's value. -func (s *GetPredictionInput) SetExternalModelEndpointDataBlobs(v map[string]*ModelEndpointDataBlob) *GetPredictionInput { - s.ExternalModelEndpointDataBlobs = v - return s -} - -type GetPredictionOutput struct { - _ struct{} `type:"structure"` - - // The model scores for models used in the detector version. - ModelScores []*ModelScores `locationName:"modelScores" type:"list"` - - // The prediction outcomes. - Outcomes []*string `locationName:"outcomes" type:"list"` - - // The rule results in the prediction. - RuleResults []*RuleResult `locationName:"ruleResults" type:"list"` -} - -// String returns the string representation -func (s GetPredictionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetPredictionOutput) GoString() string { - return s.String() -} - -// SetModelScores sets the ModelScores field's value. -func (s *GetPredictionOutput) SetModelScores(v []*ModelScores) *GetPredictionOutput { - s.ModelScores = v - return s -} - -// SetOutcomes sets the Outcomes field's value. -func (s *GetPredictionOutput) SetOutcomes(v []*string) *GetPredictionOutput { - s.Outcomes = v - return s -} - -// SetRuleResults sets the RuleResults field's value. -func (s *GetPredictionOutput) SetRuleResults(v []*RuleResult) *GetPredictionOutput { - s.RuleResults = v - return s -} - type GetRulesInput struct { _ struct{} `type:"structure"` @@ -8522,7 +8311,7 @@ type GetRulesInput struct { RuleId *string `locationName:"ruleId" min:"1" type:"string"` // The rule version. - RuleVersion *string `locationName:"ruleVersion" type:"string"` + RuleVersion *string `locationName:"ruleVersion" min:"1" type:"string"` } // String returns the string representation @@ -8550,6 +8339,9 @@ func (s *GetRulesInput) Validate() error { if s.RuleId != nil && len(*s.RuleId) < 1 { invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) } + if s.RuleVersion != nil && len(*s.RuleVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleVersion", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9439,7 +9231,7 @@ type ModelVersionDetail struct { ModelType *string `locationName:"modelType" type:"string" enum:"ModelTypeEnum"` // The model version number. - ModelVersionNumber *string `locationName:"modelVersionNumber" type:"string"` + ModelVersionNumber *string `locationName:"modelVersionNumber" min:"3" type:"string"` // The status of the model version. Status *string `locationName:"status" type:"string"` @@ -9920,6 +9712,11 @@ type PutExternalModelInput struct { // InputConfiguration is a required field InputConfiguration *ModelInputConfiguration `locationName:"inputConfiguration" type:"structure" required:"true"` + // The IAM role used to invoke the model endpoint. + // + // InvokeModelEndpointRoleArn is a required field + InvokeModelEndpointRoleArn *string `locationName:"invokeModelEndpointRoleArn" type:"string" required:"true"` + // The model endpoints name. // // ModelEndpoint is a required field @@ -9940,11 +9737,6 @@ type PutExternalModelInput struct { // OutputConfiguration is a required field OutputConfiguration *ModelOutputConfiguration `locationName:"outputConfiguration" type:"structure" required:"true"` - // The IAM role used to invoke the model endpoint. - // - // Role is a required field - Role *Role `locationName:"role" type:"structure" required:"true"` - // A collection of key and value pairs. Tags []*Tag `locationName:"tags" type:"list"` } @@ -9968,6 +9760,9 @@ func (s *PutExternalModelInput) Validate() error { if s.InputConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("InputConfiguration")) } + if s.InvokeModelEndpointRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("InvokeModelEndpointRoleArn")) + } if s.ModelEndpoint == nil { invalidParams.Add(request.NewErrParamRequired("ModelEndpoint")) } @@ -9983,9 +9778,6 @@ func (s *PutExternalModelInput) Validate() error { if s.OutputConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("OutputConfiguration")) } - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } if s.InputConfiguration != nil { if err := s.InputConfiguration.Validate(); err != nil { invalidParams.AddNested("InputConfiguration", err.(request.ErrInvalidParams)) @@ -9996,11 +9788,6 @@ func (s *PutExternalModelInput) Validate() error { invalidParams.AddNested("OutputConfiguration", err.(request.ErrInvalidParams)) } } - if s.Role != nil { - if err := s.Role.Validate(); err != nil { - invalidParams.AddNested("Role", err.(request.ErrInvalidParams)) - } - } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -10030,6 +9817,12 @@ func (s *PutExternalModelInput) SetInputConfiguration(v *ModelInputConfiguration return s } +// SetInvokeModelEndpointRoleArn sets the InvokeModelEndpointRoleArn field's value. +func (s *PutExternalModelInput) SetInvokeModelEndpointRoleArn(v string) *PutExternalModelInput { + s.InvokeModelEndpointRoleArn = &v + return s +} + // SetModelEndpoint sets the ModelEndpoint field's value. func (s *PutExternalModelInput) SetModelEndpoint(v string) *PutExternalModelInput { s.ModelEndpoint = &v @@ -10054,12 +9847,6 @@ func (s *PutExternalModelInput) SetOutputConfiguration(v *ModelOutputConfigurati return s } -// SetRole sets the Role field's value. -func (s *PutExternalModelInput) SetRole(v *Role) *PutExternalModelInput { - s.Role = v - return s -} - // SetTags sets the Tags field's value. func (s *PutExternalModelInput) SetTags(v []*Tag) *PutExternalModelInput { s.Tags = v @@ -10362,59 +10149,6 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } -// The role used to invoke external model endpoints. -type Role struct { - _ struct{} `type:"structure"` - - // The role ARN. - // - // Arn is a required field - Arn *string `locationName:"arn" type:"string" required:"true"` - - // The role name. - // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` -} - -// String returns the string representation -func (s Role) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Role) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Role) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Role"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetArn sets the Arn field's value. -func (s *Role) SetArn(v string) *Role { - s.Arn = &v - return s -} - -// SetName sets the Name field's value. -func (s *Role) SetName(v string) *Role { - s.Name = &v - return s -} - // A rule. type Rule struct { _ struct{} `type:"structure"` @@ -10432,7 +10166,7 @@ type Rule struct { // The rule version. // // RuleVersion is a required field - RuleVersion *string `locationName:"ruleVersion" type:"string" required:"true"` + RuleVersion *string `locationName:"ruleVersion" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -10463,6 +10197,9 @@ func (s *Rule) Validate() error { if s.RuleVersion == nil { invalidParams.Add(request.NewErrParamRequired("RuleVersion")) } + if s.RuleVersion != nil && len(*s.RuleVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleVersion", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10520,7 +10257,7 @@ type RuleDetail struct { RuleId *string `locationName:"ruleId" min:"1" type:"string"` // The rule version. - RuleVersion *string `locationName:"ruleVersion" type:"string"` + RuleVersion *string `locationName:"ruleVersion" min:"1" type:"string"` } // String returns the string representation @@ -11027,7 +10764,7 @@ type UpdateDetectorVersionInput struct { // The detector version ID. // // DetectorVersionId is a required field - DetectorVersionId *string `locationName:"detectorVersionId" type:"string" required:"true"` + DetectorVersionId *string `locationName:"detectorVersionId" min:"1" type:"string" required:"true"` // The Amazon SageMaker model endpoints to include in the detector version. // @@ -11081,6 +10818,9 @@ func (s *UpdateDetectorVersionInput) Validate() error { if s.DetectorVersionId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorVersionId")) } + if s.DetectorVersionId != nil && len(*s.DetectorVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorVersionId", 1)) + } if s.ExternalModelEndpoints == nil { invalidParams.Add(request.NewErrParamRequired("ExternalModelEndpoints")) } @@ -11172,7 +10912,7 @@ type UpdateDetectorVersionMetadataInput struct { // The detector version ID. // // DetectorVersionId is a required field - DetectorVersionId *string `locationName:"detectorVersionId" type:"string" required:"true"` + DetectorVersionId *string `locationName:"detectorVersionId" min:"1" type:"string" required:"true"` } // String returns the string representation @@ -11203,6 +10943,9 @@ func (s *UpdateDetectorVersionMetadataInput) Validate() error { if s.DetectorVersionId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorVersionId")) } + if s.DetectorVersionId != nil && len(*s.DetectorVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorVersionId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11267,7 +11010,7 @@ type UpdateDetectorVersionStatusInput struct { // The detector version ID. // // DetectorVersionId is a required field - DetectorVersionId *string `locationName:"detectorVersionId" type:"string" required:"true"` + DetectorVersionId *string `locationName:"detectorVersionId" min:"1" type:"string" required:"true"` // The new status. // @@ -11297,6 +11040,9 @@ func (s *UpdateDetectorVersionStatusInput) Validate() error { if s.DetectorVersionId == nil { invalidParams.Add(request.NewErrParamRequired("DetectorVersionId")) } + if s.DetectorVersionId != nil && len(*s.DetectorVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorVersionId", 1)) + } if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -11429,7 +11175,7 @@ type UpdateModelVersionInput struct { // The major version number. // // MajorVersionNumber is a required field - MajorVersionNumber *string `locationName:"majorVersionNumber" type:"string" required:"true"` + MajorVersionNumber *string `locationName:"majorVersionNumber" min:"1" type:"string" required:"true"` // The model ID. // @@ -11461,6 +11207,9 @@ func (s *UpdateModelVersionInput) Validate() error { if s.MajorVersionNumber == nil { invalidParams.Add(request.NewErrParamRequired("MajorVersionNumber")) } + if s.MajorVersionNumber != nil && len(*s.MajorVersionNumber) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MajorVersionNumber", 1)) + } if s.ModelId == nil { invalidParams.Add(request.NewErrParamRequired("ModelId")) } @@ -11532,7 +11281,7 @@ type UpdateModelVersionOutput struct { ModelType *string `locationName:"modelType" type:"string" enum:"ModelTypeEnum"` // The model version number of the model version updated. - ModelVersionNumber *string `locationName:"modelVersionNumber" type:"string"` + ModelVersionNumber *string `locationName:"modelVersionNumber" min:"3" type:"string"` // The status of the updated model version. Status *string `locationName:"status" type:"string"` @@ -11588,7 +11337,7 @@ type UpdateModelVersionStatusInput struct { // The model version number. // // ModelVersionNumber is a required field - ModelVersionNumber *string `locationName:"modelVersionNumber" type:"string" required:"true"` + ModelVersionNumber *string `locationName:"modelVersionNumber" min:"3" type:"string" required:"true"` // The model version status. // @@ -11621,6 +11370,9 @@ func (s *UpdateModelVersionStatusInput) Validate() error { if s.ModelVersionNumber == nil { invalidParams.Add(request.NewErrParamRequired("ModelVersionNumber")) } + if s.ModelVersionNumber != nil && len(*s.ModelVersionNumber) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ModelVersionNumber", 3)) + } if s.Status == nil { invalidParams.Add(request.NewErrParamRequired("Status")) } @@ -11902,7 +11654,7 @@ type UpdateVariableInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // The variable type. + // The variable type. For more information see Variable types (https://docs.aws.amazon.com/frauddetector/latest/ug/create-a-variable.html#variable-types). VariableType *string `locationName:"variableType" type:"string"` } @@ -12036,7 +11788,7 @@ type Variable struct { // The data source of the variable. DataSource *string `locationName:"dataSource" type:"string" enum:"DataSource"` - // The data type of the variable. + // The data type of the variable. For more information see Variable types (https://docs.aws.amazon.com/frauddetector/latest/ug/create-a-variable.html#variable-types). DataType *string `locationName:"dataType" type:"string" enum:"DataType"` // The default value of the variable. @@ -12147,7 +11899,7 @@ type VariableEntry struct { // The name of the variable. Name *string `locationName:"name" type:"string"` - // The type of the variable. + // The type of the variable. For more information see Variable types (https://docs.aws.amazon.com/frauddetector/latest/ug/create-a-variable.html#variable-types). // // Valid Values: AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | // BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE @@ -12282,35 +12034,11 @@ const ( ) const ( - // ModelVersionStatusTrainingInProgress is a ModelVersionStatus enum value - ModelVersionStatusTrainingInProgress = "TRAINING_IN_PROGRESS" - - // ModelVersionStatusTrainingComplete is a ModelVersionStatus enum value - ModelVersionStatusTrainingComplete = "TRAINING_COMPLETE" - - // ModelVersionStatusActivateRequested is a ModelVersionStatus enum value - ModelVersionStatusActivateRequested = "ACTIVATE_REQUESTED" - - // ModelVersionStatusActivateInProgress is a ModelVersionStatus enum value - ModelVersionStatusActivateInProgress = "ACTIVATE_IN_PROGRESS" - // ModelVersionStatusActive is a ModelVersionStatus enum value ModelVersionStatusActive = "ACTIVE" - // ModelVersionStatusInactivateInProgress is a ModelVersionStatus enum value - ModelVersionStatusInactivateInProgress = "INACTIVATE_IN_PROGRESS" - // ModelVersionStatusInactive is a ModelVersionStatus enum value ModelVersionStatusInactive = "INACTIVE" - - // ModelVersionStatusDeleteRequested is a ModelVersionStatus enum value - ModelVersionStatusDeleteRequested = "DELETE_REQUESTED" - - // ModelVersionStatusDeleteInProgress is a ModelVersionStatus enum value - ModelVersionStatusDeleteInProgress = "DELETE_IN_PROGRESS" - - // ModelVersionStatusError is a ModelVersionStatus enum value - ModelVersionStatusError = "ERROR" ) const ( diff --git a/service/frauddetector/frauddetectoriface/interface.go b/service/frauddetector/frauddetectoriface/interface.go index 461cfb74fa5..fc196ea1575 100644 --- a/service/frauddetector/frauddetectoriface/interface.go +++ b/service/frauddetector/frauddetectoriface/interface.go @@ -180,10 +180,6 @@ type FraudDetectorAPI interface { GetOutcomesPages(*frauddetector.GetOutcomesInput, func(*frauddetector.GetOutcomesOutput, bool) bool) error GetOutcomesPagesWithContext(aws.Context, *frauddetector.GetOutcomesInput, func(*frauddetector.GetOutcomesOutput, bool) bool, ...request.Option) error - GetPrediction(*frauddetector.GetPredictionInput) (*frauddetector.GetPredictionOutput, error) - GetPredictionWithContext(aws.Context, *frauddetector.GetPredictionInput, ...request.Option) (*frauddetector.GetPredictionOutput, error) - GetPredictionRequest(*frauddetector.GetPredictionInput) (*request.Request, *frauddetector.GetPredictionOutput) - GetRules(*frauddetector.GetRulesInput) (*frauddetector.GetRulesOutput, error) GetRulesWithContext(aws.Context, *frauddetector.GetRulesInput, ...request.Option) (*frauddetector.GetRulesOutput, error) GetRulesRequest(*frauddetector.GetRulesInput) (*request.Request, *frauddetector.GetRulesOutput) diff --git a/service/fsx/api.go b/service/fsx/api.go index 5519d4c1790..8cb64359c91 100644 --- a/service/fsx/api.go +++ b/service/fsx/api.go @@ -169,8 +169,11 @@ func (c *FSx) CreateBackupRequest(input *CreateBackupInput) (req *request.Reques // // * is not linked to an Amazon S3 data respository. // -// For more information, see https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-backups.html -// (https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-backups.html). +// For more information about backing up Amazon FSx for Lustre file systems, +// see Working with FSx for Lustre backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). +// +// For more information about backing up Amazon FSx for Lustre file systems, +// see Working with FSx for Windows backups (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html). // // If a backup with the specified client request token exists, and the parameters // match, this operation returns the description of the existing backup. If @@ -1694,9 +1697,10 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // UpdateFileSystem API operation for Amazon FSx. // // Use this operation to update the configuration of an existing Amazon FSx -// file system. For an Amazon FSx for Lustre file system, you can update only -// the WeeklyMaintenanceStartTime. For an Amazon for Windows File Server file -// system, you can update the following properties: +// file system. You can update multiple properties in a single request. +// +// For Amazon FSx for Windows File Server file systems, you can update the following +// properties: // // * AutomaticBackupRetentionDays // @@ -1710,7 +1714,15 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // // * WeeklyMaintenanceStartTime // -// You can update multiple properties in a single request. +// For Amazon FSx for Lustre file systems, you can update the following properties: +// +// * AutoImportPolicy +// +// * AutomaticBackupRetentionDays +// +// * DailyAutomaticBackupStartTime +// +// * WeeklyMaintenanceStartTime // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2033,7 +2045,7 @@ type Backup struct { // Tags associated with a particular file system. Tags []*Tag `min:"1" type:"list"` - // The type of the backup. + // The type of the file system backup. // // Type is a required field Type *string `type:"string" required:"true" enum:"BackupType"` @@ -3200,25 +3212,29 @@ func (s *CreateFileSystemInput) SetWindowsConfiguration(v *CreateFileSystemWindo type CreateFileSystemLustreConfiguration struct { _ struct{} `type:"structure"` - // Use this property to turn the Autoimport feature on and off. AutoImport enables - // your FSx for Lustre file system to automatically update its contents with - // changes that have been made to its linked Amazon S3 data repository. You - // can set the policy to have one the following values: + // (Optional) Use this property to configure the AutoImport feature on the file + // system's linked Amazon S3 data repository. You use AutoImport to update the + // contents of your FSx for Lustre file system automatically with changes that + // occur in the linked S3 data repository. AutoImportPolicy can have the following + // values: // - // * NONE - (Default) Autoimport is turned off. Changes to your S3 repository - // will not be reflected on the FSx file system. + // * NONE - (Default) AutoImport is off. Changes in the linked data repository + // are not reflected on the FSx file system. // - // * NEW - Autoimport is turned on; only new files in the linked S3 repository - // will be imported to the FSx file system. Updates to existing files and - // deleted files will not be imported to the FSx file system. + // * NEW - AutoImport is on. New files in the linked data repository that + // do not currently exist in the FSx file system are automatically imported. + // Updates to existing FSx files are not imported to the FSx file system. + // Files deleted from the linked data repository are not deleted from the + // FSx file system. // - // * NEW_CHANGED - Autoimport is turned on; new files and changes to existing - // files in the linked S3 repository will be imported to the FSx file system. - // Files deleted in S3 are not deleted in the FSx file system. + // * NEW_CHANGED - AutoImport is on. New files in the linked S3 data repository + // that do not currently exist in the FSx file system are automatically imported. + // Changes to existing FSx files in the linked repository are also automatically + // imported to the FSx file system. Files deleted from the linked data repository + // are not deleted from the FSx file system. // - // * NEW_CHANGED_DELETED - Autoimport is turned on; new files, changes to - // existing files, and deleted files in the linked S3 repository will be - // imported to the FSx file system. + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` // The number of days to retain automatic backups. Setting this to 0 disables @@ -3233,6 +3249,8 @@ type CreateFileSystemLustreConfiguration struct { // or more tags, only the specified tags are copied to backups. If you specify // one or more tags when creating a user-initiated backup, no tags are copied // from the file system, regardless of this value. + // + // For more information, see Working with backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). CopyTagsToBackups *bool `type:"boolean"` // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of @@ -3244,8 +3262,6 @@ type CreateFileSystemLustreConfiguration struct { // and shorter-term processing of data. The SCRATCH_2 deployment type provides // in-transit encryption of data and higher burst throughput capacity than SCRATCH_1. // - // This option can only be set for for PERSISTENT_1 deployments types. - // // Choose PERSISTENT_1 deployment type for longer-term storage and workloads // and encryption of data in transit. To learn more about deployment types, // see FSx for Lustre Deployment Options (https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html). @@ -3603,25 +3619,28 @@ func (s *CreateFileSystemWindowsConfiguration) SetWeeklyMaintenanceStartTime(v s type DataRepositoryConfiguration struct { _ struct{} `type:"structure"` - // Describes the data repository's AutoImportPolicy. AutoImport enables your - // FSx for Lustre file system to automatically update its contents with changes - // that have been made to its linked Amazon S3 data repository. The policy can - // have the following values: + // Describes the file system's linked S3 data repository's AutoImportPolicy. + // The AutoImportPolicy configures how your FSx for Lustre file system automatically + // updates its contents with changes that occur in the linked S3 data repository. + // AutoImportPolicy can have the following values: // - // * NONE - (Default) Autoimport is turned off, Changes to your S3 repository - // will not be reflected on the FSx file system. + // * NONE - (Default) AutoImport is off. Changes in the linked data repository + // are not reflected on the FSx file system. // - // * NEW - Autoimport is turned on; only new files in the linked S3 repository - // will be imported to the FSx file system. Updates to existing files and - // deleted files will not be imported to the FSx file system. + // * NEW - AutoImport is on. New files in the linked data repository that + // do not currently exist in the FSx file system are automatically imported. + // Updates to existing FSx files are not imported to the FSx file system. + // Files deleted from the linked data repository are not deleted from the + // FSx file system. // - // * NEW_CHANGED - Autoimport is turned on; new files and changes to existing - // files in the linked S3 repository will be imported to the FSx file system. - // Files deleted in S3 are not deleted in the FSx file system. + // * NEW_CHANGED - AutoImport is on. New files in the linked S3 data repository + // that do not currently exist in the FSx file system are automatically imported. + // Changes to existing FSx files in the linked repository are also automatically + // imported to the FSx file system. Files deleted from the linked data repository + // are not deleted from the FSx file system. // - // * NEW_CHANGED_DELETED - Autoimport is turned on; new files, changes to - // existing files, and deleted files in the linked S3 repository will be - // imported to the FSx file system. + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` // The export path to the Amazon S3 bucket (and prefix) that you are using to @@ -3652,13 +3671,19 @@ type DataRepositoryConfiguration struct { // is configured with an S3 repository. The lifecycle can have the following // values: // - // * CREATING - Amazon FSx is creating the new data repository. + // * CREATING - The data repository configuration between the FSx file system + // and the linked S3 data repository is being created. The data repository + // is unavailable. // // * AVAILABLE - The data repository is available for use. // - // * MISCONFIGURED - The data repository is in a failed but recoverable state. + // * MISCONFIGURED - Amazon FSx cannot automatically import updates from + // the S3 bucket until the data repository configuration is corrected. For + // more information, see Troubleshooting a Misconfigured linked S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository). // - // * UPDATING - The data repository is undergoing a customer initiated update. + // * UPDATING - The data repository is undergoing a customer initiated update + // and availability may be impacted. Lifecycle *string `type:"string" enum:"DataRepositoryLifecycle"` } @@ -6852,25 +6877,29 @@ func (s *UpdateFileSystemInput) SetWindowsConfiguration(v *UpdateFileSystemWindo type UpdateFileSystemLustreConfiguration struct { _ struct{} `type:"structure"` - // Use this property to turn the Autoimport feature on and off. AutoImport enables - // your FSx for Lustre file system to automatically update its contents with - // changes that have been made to its linked Amazon S3 data repository. You - // can set the policy to have one the following values: + // (Optional) Use this property to configure the AutoImport feature on the file + // system's linked Amazon S3 data repository. You use AutoImport to update the + // contents of your FSx for Lustre file system automatically with changes that + // occur in the linked S3 data repository. AutoImportPolicy can have the following + // values: // - // * NONE - (Default) Autoimport is turned off. Changes to your S3 repository - // will not be reflected on the FSx file system. + // * NONE - (Default) AutoImport is off. Changes in the linked data repository + // are not reflected on the FSx file system. // - // * NEW - Autoimport is turned on; only new files in the linked S3 repository - // will be imported to the FSx file system. Updates to existing files and - // deleted files will not be imported to the FSx file system. + // * NEW - AutoImport is on. New files in the linked data repository that + // do not currently exist in the FSx file system are automatically imported. + // Updates to existing FSx files are not imported to the FSx file system. + // Files deleted from the linked data repository are not deleted from the + // FSx file system. // - // * NEW_CHANGED - Autoimport is turned on; new files and changes to existing - // files in the linked S3 repository will be imported to the FSx file system. - // Files deleted in S3 are not deleted in the FSx file system. + // * NEW_CHANGED - AutoImport is on. New files in the linked S3 data repository + // that do not currently exist in the FSx file system are automatically imported. + // Changes to existing FSx files in the linked repository are also automatically + // imported to the FSx file system. Files deleted from the linked data repository + // are not deleted from the FSx file system. // - // * NEW_CHANGED_DELETED - Autoimport is turned on; new files, changes to - // existing files, and deleted files in the linked S3 repository will be - // imported to the FSx file system. + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` // The number of days to retain automatic backups. Setting this to 0 disables diff --git a/service/kendra/api.go b/service/kendra/api.go index aca13689f5d..7d86e93410a 100644 --- a/service/kendra/api.go +++ b/service/kendra/api.go @@ -4502,6 +4502,10 @@ type DatabaseConfiguration struct { // DatabaseEngineType is a required field DatabaseEngineType *string `type:"string" required:"true" enum:"DatabaseEngineType"` + // Provides information about how Amazon Kendra uses quote marks around SQL + // identifiers when querying a database data source. + SqlConfiguration *SqlConfiguration `type:"structure"` + // Provides information for connecting to an Amazon VPC. VpcConfiguration *DataSourceVpcConfiguration `type:"structure"` } @@ -4579,6 +4583,12 @@ func (s *DatabaseConfiguration) SetDatabaseEngineType(v string) *DatabaseConfigu return s } +// SetSqlConfiguration sets the SqlConfiguration field's value. +func (s *DatabaseConfiguration) SetSqlConfiguration(v *SqlConfiguration) *DatabaseConfiguration { + s.SqlConfiguration = v + return s +} + // SetVpcConfiguration sets the VpcConfiguration field's value. func (s *DatabaseConfiguration) SetVpcConfiguration(v *DataSourceVpcConfiguration) *DatabaseConfiguration { s.VpcConfiguration = v @@ -5512,7 +5522,7 @@ func (s *DocumentAttribute) SetValue(v *DocumentAttributeValue) *DocumentAttribu type DocumentAttributeValue struct { _ struct{} `type:"structure"` - // A date value expressed as seconds from the Unix epoch. + // A date expressed as an ISO 8601 string. DateValue *time.Time `type:"timestamp"` // A long integer value. @@ -6901,6 +6911,15 @@ type QueryInput struct { // attributes are included in the response. By default all document attributes // are included in the response. RequestedDocumentAttributes []*string `min:"1" type:"list"` + + // Provides information that determines how the results of the query are sorted. + // You can set the field that Amazon Kendra should sort the results on, and + // specify whether the results should be sorted in ascending or descending order. + // In the case of ties in sorting the results, the results are sorted by relevance. + // + // If you don't provide sorting configuration, the results are sorted by the + // relevance that Amazon Kendra determines for the result. + SortingConfiguration *SortingConfiguration `type:"structure"` } // String returns the string representation @@ -6946,6 +6965,11 @@ func (s *QueryInput) Validate() error { } } } + if s.SortingConfiguration != nil { + if err := s.SortingConfiguration.Validate(); err != nil { + invalidParams.AddNested("SortingConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7001,6 +7025,12 @@ func (s *QueryInput) SetRequestedDocumentAttributes(v []*string) *QueryInput { return s } +// SetSortingConfiguration sets the SortingConfiguration field's value. +func (s *QueryInput) SetSortingConfiguration(v *SortingConfiguration) *QueryInput { + s.SortingConfiguration = v + return s +} + type QueryOutput struct { _ struct{} `type:"structure"` @@ -8399,6 +8429,11 @@ type Search struct { // weights the field in the search. The default is true for string fields and // false for number and date fields. Searchable *bool `type:"boolean"` + + // Determines whether the field can be used to sort the results of a query. + // If you specify sorting on a field that does not have Sortable set to true, + // Amazon Kendra returns an exception. The default is false. + Sortable *bool `type:"boolean"` } // String returns the string representation @@ -8429,6 +8464,12 @@ func (s *Search) SetSearchable(v bool) *Search { return s } +// SetSortable sets the Sortable field's value. +func (s *Search) SetSortable(v bool) *Search { + s.Sortable = &v + return s +} + // Provides the identifier of the AWS KMS customer master key (CMK) used to // encrypt data indexed by Amazon Kendra. Amazon Kendra doesn't support asymmetric // CMKs. @@ -8862,7 +8903,7 @@ type SharePointConfiguration struct { // The Microsoft SharePoint attribute field that contains the title of the document. DocumentTitleFieldName *string `min:"1" type:"string"` - // A list of regulary expression patterns. Documents that match the patterns + // A list of regular expression patterns. Documents that match the patterns // are excluded from the index. Documents that don't match the patterns are // included in the index. If a document matches both an exclusion pattern and // an inclusion pattern, the document is not included in the index. @@ -9032,6 +9073,115 @@ func (s *SharePointConfiguration) SetVpcConfiguration(v *DataSourceVpcConfigurat return s } +// Specifies the document attribute to use to sort the response to a Amazon +// Kendra query. You can specify a single attribute for sorting. The attribute +// must have the Sortable flag set to true, otherwise Amazon Kendra returns +// an exception. +type SortingConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the document attribute used to sort the response. You can use + // any field that has the Sortable flag set to true. + // + // You can also sort by any of the following built-in attributes: + // + // * _category + // + // * _created_at + // + // * _last_updated_at + // + // * _version + // + // * _view_count + // + // DocumentAttributeKey is a required field + DocumentAttributeKey *string `min:"1" type:"string" required:"true"` + + // The order that the results should be returned in. In case of ties, the relevance + // assigned to the result by Amazon Kendra is used as the tie-breaker. + // + // SortOrder is a required field + SortOrder *string `type:"string" required:"true" enum:"SortOrder"` +} + +// String returns the string representation +func (s SortingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SortingConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SortingConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SortingConfiguration"} + if s.DocumentAttributeKey == nil { + invalidParams.Add(request.NewErrParamRequired("DocumentAttributeKey")) + } + if s.DocumentAttributeKey != nil && len(*s.DocumentAttributeKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DocumentAttributeKey", 1)) + } + if s.SortOrder == nil { + invalidParams.Add(request.NewErrParamRequired("SortOrder")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDocumentAttributeKey sets the DocumentAttributeKey field's value. +func (s *SortingConfiguration) SetDocumentAttributeKey(v string) *SortingConfiguration { + s.DocumentAttributeKey = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *SortingConfiguration) SetSortOrder(v string) *SortingConfiguration { + s.SortOrder = &v + return s +} + +// Provides information that configures Amazon Kendra to use a SQL database. +type SqlConfiguration struct { + _ struct{} `type:"structure"` + + // Determines whether Amazon Kendra encloses SQL identifiers in double quotes + // (") when making a database query. + // + // By default, Amazon Kendra passes SQL identifiers the way that they are entered + // into the data source configuration. It does not change the case of identifiers + // or enclose them in quotes. + // + // PostgreSQL internally converts uppercase characters to lower case characters + // in identifiers unless they are quoted. Choosing this option encloses identifiers + // in quotes so that PostgreSQL does not convert the character's case. + // + // For MySQL databases, you must enable the ansi_quotes option when you choose + // this option. + QueryIdentifiersEnclosingOption *string `type:"string" enum:"QueryIdentifiersEnclosingOption"` +} + +// String returns the string representation +func (s SqlConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlConfiguration) GoString() string { + return s.String() +} + +// SetQueryIdentifiersEnclosingOption sets the QueryIdentifiersEnclosingOption field's value. +func (s *SqlConfiguration) SetQueryIdentifiersEnclosingOption(v string) *SqlConfiguration { + s.QueryIdentifiersEnclosingOption = &v + return s +} + type StartDataSourceSyncJobInput struct { _ struct{} `type:"structure"` @@ -10165,6 +10315,14 @@ const ( PrincipalTypeGroup = "GROUP" ) +const ( + // QueryIdentifiersEnclosingOptionDoubleQuotes is a QueryIdentifiersEnclosingOption enum value + QueryIdentifiersEnclosingOptionDoubleQuotes = "DOUBLE_QUOTES" + + // QueryIdentifiersEnclosingOptionNone is a QueryIdentifiersEnclosingOption enum value + QueryIdentifiersEnclosingOptionNone = "NONE" +) + const ( // QueryResultTypeDocument is a QueryResultType enum value QueryResultTypeDocument = "DOCUMENT" @@ -10276,3 +10434,11 @@ const ( // SharePointVersionSharepointOnline is a SharePointVersion enum value SharePointVersionSharepointOnline = "SHAREPOINT_ONLINE" ) + +const ( + // SortOrderDesc is a SortOrder enum value + SortOrderDesc = "DESC" + + // SortOrderAsc is a SortOrder enum value + SortOrderAsc = "ASC" +) diff --git a/service/macie2/api.go b/service/macie2/api.go index d526cfc501b..ca6d5ed661b 100644 --- a/service/macie2/api.go +++ b/service/macie2/api.go @@ -6647,9 +6647,9 @@ type BucketMetadata struct { ObjectCount *int64 `locationName:"objectCount" type:"long"` - // The total number of objects that are in the bucket, grouped by server-side - // encryption type. This includes a grouping that reports the total number of - // objects that aren't encrypted. + // Provides information about the number of objects that are in an S3 bucket + // and use certain types of server-side encryption, use client-side encryption, + // or aren't encrypted. ObjectCountByEncryptionType *ObjectCountByEncryptionType `locationName:"objectCountByEncryptionType" type:"structure"` // Provides information about permissions settings that determine whether an @@ -11726,9 +11726,9 @@ func (s *MonthlySchedule) SetDayOfMonth(v int64) *MonthlySchedule { return s } -// The total number of objects that are in the bucket, grouped by server-side -// encryption type. This includes a grouping that reports the total number of -// objects that aren't encrypted. +// Provides information about the number of objects that are in an S3 bucket +// and use certain types of server-side encryption, use client-side encryption, +// or aren't encrypted. type ObjectCountByEncryptionType struct { _ struct{} `type:"structure"` @@ -13702,13 +13702,17 @@ func (s *UsageRecord) SetUsage(v []*UsageByAccount) *UsageRecord { return s } -// Specifies criteria for filtering the results of a query for account quotas +// Specifies a condition for filtering the results of a query for account quotas // and usage data. type UsageStatisticsFilter struct { _ struct{} `type:"structure"` - // The field to use to filter the results of a query for account quotas and - // usage data: + // The operator to use in a condition that filters the results of a query for + // account quotas and usage data. Valid values are: + Comparator *string `locationName:"comparator" type:"string" enum:"UsageStatisticsFilterComparator"` + + // The field to use in a condition that filters the results of a query for account + // quotas and usage data. Valid values are: Key *string `locationName:"key" type:"string" enum:"UsageStatisticsFilterKey"` Values []*string `locationName:"values" type:"list"` @@ -13724,6 +13728,12 @@ func (s UsageStatisticsFilter) GoString() string { return s.String() } +// SetComparator sets the Comparator field's value. +func (s *UsageStatisticsFilter) SetComparator(v string) *UsageStatisticsFilter { + s.Comparator = &v + return s +} + // SetKey sets the Key field's value. func (s *UsageStatisticsFilter) SetKey(v string) *UsageStatisticsFilter { s.Key = &v @@ -14422,11 +14432,45 @@ const ( UnitTerabytes = "TERABYTES" ) -// The field to use to filter the results of a query for account quotas and -// usage data: +// The operator to use in a condition that filters the results of a query for +// account quotas and usage data. Valid values are: +const ( + // UsageStatisticsFilterComparatorGt is a UsageStatisticsFilterComparator enum value + UsageStatisticsFilterComparatorGt = "GT" + + // UsageStatisticsFilterComparatorGte is a UsageStatisticsFilterComparator enum value + UsageStatisticsFilterComparatorGte = "GTE" + + // UsageStatisticsFilterComparatorLt is a UsageStatisticsFilterComparator enum value + UsageStatisticsFilterComparatorLt = "LT" + + // UsageStatisticsFilterComparatorLte is a UsageStatisticsFilterComparator enum value + UsageStatisticsFilterComparatorLte = "LTE" + + // UsageStatisticsFilterComparatorEq is a UsageStatisticsFilterComparator enum value + UsageStatisticsFilterComparatorEq = "EQ" + + // UsageStatisticsFilterComparatorNe is a UsageStatisticsFilterComparator enum value + UsageStatisticsFilterComparatorNe = "NE" + + // UsageStatisticsFilterComparatorContains is a UsageStatisticsFilterComparator enum value + UsageStatisticsFilterComparatorContains = "CONTAINS" +) + +// The field to use in a condition that filters the results of a query for account +// quotas and usage data. Valid values are: const ( // UsageStatisticsFilterKeyAccountId is a UsageStatisticsFilterKey enum value UsageStatisticsFilterKeyAccountId = "accountId" + + // UsageStatisticsFilterKeyServiceLimit is a UsageStatisticsFilterKey enum value + UsageStatisticsFilterKeyServiceLimit = "serviceLimit" + + // UsageStatisticsFilterKeyFreeTrialStartDate is a UsageStatisticsFilterKey enum value + UsageStatisticsFilterKeyFreeTrialStartDate = "freeTrialStartDate" + + // UsageStatisticsFilterKeyTotal is a UsageStatisticsFilterKey enum value + UsageStatisticsFilterKeyTotal = "total" ) // The field to use to sort the results of a query for account quotas and usage @@ -14437,6 +14481,12 @@ const ( // UsageStatisticsSortKeyTotal is a UsageStatisticsSortKey enum value UsageStatisticsSortKeyTotal = "total" + + // UsageStatisticsSortKeyServiceLimitValue is a UsageStatisticsSortKey enum value + UsageStatisticsSortKeyServiceLimitValue = "serviceLimitValue" + + // UsageStatisticsSortKeyFreeTrialStartDate is a UsageStatisticsSortKey enum value + UsageStatisticsSortKeyFreeTrialStartDate = "freeTrialStartDate" ) // The name of a usage metric for an account. Possible values are: diff --git a/service/mediaconnect/api.go b/service/mediaconnect/api.go index 789b705c351..80bb7525c5f 100644 --- a/service/mediaconnect/api.go +++ b/service/mediaconnect/api.go @@ -3508,6 +3508,9 @@ type Entitlement struct { // EntitlementArn is a required field EntitlementArn *string `locationName:"entitlementArn" type:"string" required:"true"` + // An indication of whether the entitlement is enabled. + EntitlementStatus *string `locationName:"entitlementStatus" type:"string" enum:"EntitlementStatus"` + // The name of the entitlement. // // Name is a required field @@ -3555,6 +3558,12 @@ func (s *Entitlement) SetEntitlementArn(v string) *Entitlement { return s } +// SetEntitlementStatus sets the EntitlementStatus field's value. +func (s *Entitlement) SetEntitlementStatus(v string) *Entitlement { + s.EntitlementStatus = &v + return s +} + // SetName sets the Name field's value. func (s *Entitlement) SetName(v string) *Entitlement { s.Name = &v @@ -3812,6 +3821,11 @@ type GrantEntitlementRequest struct { // with this entitlement. Encryption *Encryption `locationName:"encryption" type:"structure"` + // An indication of whether the new entitlement should be enabled or disabled + // as soon as it is created. If you don’t specify the entitlementStatus field + // in your request, MediaConnect sets it to ENABLED. + EntitlementStatus *string `locationName:"entitlementStatus" type:"string" enum:"EntitlementStatus"` + // The name of the entitlement. This value must be unique within the current // flow. Name *string `locationName:"name" type:"string"` @@ -3870,6 +3884,12 @@ func (s *GrantEntitlementRequest) SetEncryption(v *Encryption) *GrantEntitlement return s } +// SetEntitlementStatus sets the EntitlementStatus field's value. +func (s *GrantEntitlementRequest) SetEntitlementStatus(v string) *GrantEntitlementRequest { + s.EntitlementStatus = &v + return s +} + // SetName sets the Name field's value. func (s *GrantEntitlementRequest) SetName(v string) *GrantEntitlementRequest { s.Name = &v @@ -5918,6 +5938,12 @@ type UpdateFlowEntitlementInput struct { // EntitlementArn is a required field EntitlementArn *string `location:"uri" locationName:"entitlementArn" type:"string" required:"true"` + // An indication of whether you want to enable the entitlement to allow access, + // or disable it to stop streaming content to the subscriber’s flow temporarily. + // If you don’t specify the entitlementStatus field in your request, MediaConnect + // leaves the value unchanged. + EntitlementStatus *string `locationName:"entitlementStatus" type:"string" enum:"EntitlementStatus"` + // FlowArn is a required field FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` @@ -5977,6 +6003,12 @@ func (s *UpdateFlowEntitlementInput) SetEntitlementArn(v string) *UpdateFlowEnti return s } +// SetEntitlementStatus sets the EntitlementStatus field's value. +func (s *UpdateFlowEntitlementInput) SetEntitlementStatus(v string) *UpdateFlowEntitlementInput { + s.EntitlementStatus = &v + return s +} + // SetFlowArn sets the FlowArn field's value. func (s *UpdateFlowEntitlementInput) SetFlowArn(v string) *UpdateFlowEntitlementInput { s.FlowArn = &v @@ -6662,6 +6694,14 @@ const ( AlgorithmAes256 = "aes256" ) +const ( + // EntitlementStatusEnabled is a EntitlementStatus enum value + EntitlementStatusEnabled = "ENABLED" + + // EntitlementStatusDisabled is a EntitlementStatus enum value + EntitlementStatusDisabled = "DISABLED" +) + const ( // KeyTypeSpeke is a KeyType enum value KeyTypeSpeke = "speke" diff --git a/service/mediapackage/api.go b/service/mediapackage/api.go index 23f6d0237c8..ed1dba2ef82 100644 --- a/service/mediapackage/api.go +++ b/service/mediapackage/api.go @@ -3681,7 +3681,10 @@ type HlsManifest struct { // will omit all SCTE-35 ad markers from the output."PASSTHROUGH" causes the // manifest to contain a copy of the SCTE-35 admarkers (comments) taken directly // from the input HTTP Live Streaming (HLS) manifest."SCTE35_ENHANCED" generates - // ad markers and blackout tags based on SCTE-35messages in the input source. + // ad markers and blackout tags based on SCTE-35messages in the input source."DATERANGE" + // inserts EXT-X-DATERANGE tags to signal ad and program transition events in + // HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds + // value that is greater than 0. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` // The ID of the manifest. The ID must be unique within the OriginEndpoint and @@ -3785,7 +3788,10 @@ type HlsManifestCreateOrUpdateParameters struct { // will omit all SCTE-35 ad markers from the output."PASSTHROUGH" causes the // manifest to contain a copy of the SCTE-35 admarkers (comments) taken directly // from the input HTTP Live Streaming (HLS) manifest."SCTE35_ENHANCED" generates - // ad markers and blackout tags based on SCTE-35messages in the input source. + // ad markers and blackout tags based on SCTE-35messages in the input source."DATERANGE" + // inserts EXT-X-DATERANGE tags to signal ad and program transition events in + // HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds + // value that is greater than 0. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` // A list of SCTE-35 message types that are treated as ad markers in the output. @@ -3922,7 +3928,10 @@ type HlsPackage struct { // will omit all SCTE-35 ad markers from the output."PASSTHROUGH" causes the // manifest to contain a copy of the SCTE-35 admarkers (comments) taken directly // from the input HTTP Live Streaming (HLS) manifest."SCTE35_ENHANCED" generates - // ad markers and blackout tags based on SCTE-35messages in the input source. + // ad markers and blackout tags based on SCTE-35messages in the input source."DATERANGE" + // inserts EXT-X-DATERANGE tags to signal ad and program transition events in + // HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds + // value that is greater than 0. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` // A list of SCTE-35 message types that are treated as ad markers in the output. @@ -5933,6 +5942,9 @@ const ( // AdMarkersPassthrough is a AdMarkers enum value AdMarkersPassthrough = "PASSTHROUGH" + + // AdMarkersDaterange is a AdMarkers enum value + AdMarkersDaterange = "DATERANGE" ) // This setting allows the delivery restriction flags on SCTE-35 segmentation diff --git a/service/mq/api.go b/service/mq/api.go index 21df17b378d..62205ac5cca 100644 --- a/service/mq/api.go +++ b/service/mq/api.go @@ -1193,6 +1193,12 @@ func (c *MQ) ListBrokersRequest(input *ListBrokersInput) (req *request.Request, Name: opListBrokers, HTTPMethod: "GET", HTTPPath: "/v1/brokers", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1247,6 +1253,58 @@ func (c *MQ) ListBrokersWithContext(ctx aws.Context, input *ListBrokersInput, op return out, req.Send() } +// ListBrokersPages iterates over the pages of a ListBrokers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBrokers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBrokers operation. +// pageNum := 0 +// err := client.ListBrokersPages(params, +// func(page *mq.ListBrokersResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MQ) ListBrokersPages(input *ListBrokersInput, fn func(*ListBrokersResponse, bool) bool) error { + return c.ListBrokersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBrokersPagesWithContext same as ListBrokersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MQ) ListBrokersPagesWithContext(ctx aws.Context, input *ListBrokersInput, fn func(*ListBrokersResponse, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBrokersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBrokersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBrokersResponse), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListConfigurationRevisions = "ListConfigurationRevisions" // ListConfigurationRevisionsRequest generates a "aws/request.Request" representing the @@ -2273,6 +2331,9 @@ type Configuration struct { // Required. The ARN of the configuration. Arn *string `locationName:"arn" type:"string"` + // The authentication strategy associated with the configuration. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + // Required. The date and time of the configuration revision. Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"iso8601"` @@ -2318,6 +2379,12 @@ func (s *Configuration) SetArn(v string) *Configuration { return s } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *Configuration) SetAuthenticationStrategy(v string) *Configuration { + s.AuthenticationStrategy = &v + return s +} + // SetCreated sets the Created field's value. func (s *Configuration) SetCreated(v time.Time) *Configuration { s.Created = &v @@ -2544,6 +2611,9 @@ func (s *ConflictException) RequestID() string { type CreateBrokerRequest struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` BrokerName *string `locationName:"brokerName" type:"string"` @@ -2566,6 +2636,10 @@ type CreateBrokerRequest struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataInput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` @@ -2612,6 +2686,12 @@ func (s *CreateBrokerRequest) Validate() error { return nil } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *CreateBrokerRequest) SetAuthenticationStrategy(v string) *CreateBrokerRequest { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *CreateBrokerRequest) SetAutoMinorVersionUpgrade(v bool) *CreateBrokerRequest { s.AutoMinorVersionUpgrade = &v @@ -2666,6 +2746,12 @@ func (s *CreateBrokerRequest) SetHostInstanceType(v string) *CreateBrokerRequest return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *CreateBrokerRequest) SetLdapServerMetadata(v *LdapServerMetadataInput) *CreateBrokerRequest { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *CreateBrokerRequest) SetLogs(v *Logs) *CreateBrokerRequest { s.Logs = v @@ -2747,6 +2833,9 @@ func (s *CreateBrokerResponse) SetBrokerId(v string) *CreateBrokerResponse { type CreateConfigurationRequest struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + // The type of broker engine. Note: Currently, Amazon MQ supports only ActiveMQ. EngineType *string `locationName:"engineType" type:"string" enum:"EngineType"` @@ -2767,6 +2856,12 @@ func (s CreateConfigurationRequest) GoString() string { return s.String() } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *CreateConfigurationRequest) SetAuthenticationStrategy(v string) *CreateConfigurationRequest { + s.AuthenticationStrategy = &v + return s +} + // SetEngineType sets the EngineType field's value. func (s *CreateConfigurationRequest) SetEngineType(v string) *CreateConfigurationRequest { s.EngineType = &v @@ -2796,6 +2891,9 @@ type CreateConfigurationResponse struct { Arn *string `locationName:"arn" type:"string"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"iso8601"` Id *string `locationName:"id" type:"string"` @@ -2822,6 +2920,12 @@ func (s *CreateConfigurationResponse) SetArn(v string) *CreateConfigurationRespo return s } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *CreateConfigurationResponse) SetAuthenticationStrategy(v string) *CreateConfigurationResponse { + s.AuthenticationStrategy = &v + return s +} + // SetCreated sets the Created field's value. func (s *CreateConfigurationResponse) SetCreated(v time.Time) *CreateConfigurationResponse { s.Created = &v @@ -3429,6 +3533,9 @@ func (s *DescribeBrokerInstanceOptionsOutput) SetNextToken(v string) *DescribeBr type DescribeBrokerResponse struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` BrokerArn *string `locationName:"brokerArn" type:"string"` @@ -3460,6 +3567,10 @@ type DescribeBrokerResponse struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataOutput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs currently enabled and pending to be deployed // for the specified broker. Logs *LogsSummary `locationName:"logs" type:"structure"` @@ -3468,10 +3579,17 @@ type DescribeBrokerResponse struct { // apply pending updates or patches to the broker. MaintenanceWindowStartTime *WeeklyStartTime `locationName:"maintenanceWindowStartTime" type:"structure"` + // The authentication strategy used to secure the broker. + PendingAuthenticationStrategy *string `locationName:"pendingAuthenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + PendingEngineVersion *string `locationName:"pendingEngineVersion" type:"string"` PendingHostInstanceType *string `locationName:"pendingHostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + PendingLdapServerMetadata *LdapServerMetadataOutput `locationName:"pendingLdapServerMetadata" type:"structure"` + PendingSecurityGroups []*string `locationName:"pendingSecurityGroups" type:"list"` PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` @@ -3498,6 +3616,12 @@ func (s DescribeBrokerResponse) GoString() string { return s.String() } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *DescribeBrokerResponse) SetAuthenticationStrategy(v string) *DescribeBrokerResponse { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *DescribeBrokerResponse) SetAutoMinorVersionUpgrade(v bool) *DescribeBrokerResponse { s.AutoMinorVersionUpgrade = &v @@ -3576,6 +3700,12 @@ func (s *DescribeBrokerResponse) SetHostInstanceType(v string) *DescribeBrokerRe return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *DescribeBrokerResponse) SetLdapServerMetadata(v *LdapServerMetadataOutput) *DescribeBrokerResponse { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *DescribeBrokerResponse) SetLogs(v *LogsSummary) *DescribeBrokerResponse { s.Logs = v @@ -3588,6 +3718,12 @@ func (s *DescribeBrokerResponse) SetMaintenanceWindowStartTime(v *WeeklyStartTim return s } +// SetPendingAuthenticationStrategy sets the PendingAuthenticationStrategy field's value. +func (s *DescribeBrokerResponse) SetPendingAuthenticationStrategy(v string) *DescribeBrokerResponse { + s.PendingAuthenticationStrategy = &v + return s +} + // SetPendingEngineVersion sets the PendingEngineVersion field's value. func (s *DescribeBrokerResponse) SetPendingEngineVersion(v string) *DescribeBrokerResponse { s.PendingEngineVersion = &v @@ -3600,6 +3736,12 @@ func (s *DescribeBrokerResponse) SetPendingHostInstanceType(v string) *DescribeB return s } +// SetPendingLdapServerMetadata sets the PendingLdapServerMetadata field's value. +func (s *DescribeBrokerResponse) SetPendingLdapServerMetadata(v *LdapServerMetadataOutput) *DescribeBrokerResponse { + s.PendingLdapServerMetadata = v + return s +} + // SetPendingSecurityGroups sets the PendingSecurityGroups field's value. func (s *DescribeBrokerResponse) SetPendingSecurityGroups(v []*string) *DescribeBrokerResponse { s.PendingSecurityGroups = v @@ -3686,6 +3828,9 @@ type DescribeConfigurationOutput struct { Arn *string `locationName:"arn" type:"string"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"iso8601"` Description *string `locationName:"description" type:"string"` @@ -3721,6 +3866,12 @@ func (s *DescribeConfigurationOutput) SetArn(v string) *DescribeConfigurationOut return s } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *DescribeConfigurationOutput) SetAuthenticationStrategy(v string) *DescribeConfigurationOutput { + s.AuthenticationStrategy = &v + return s +} + // SetCreated sets the Created field's value. func (s *DescribeConfigurationOutput) SetCreated(v time.Time) *DescribeConfigurationOutput { s.Created = &v @@ -3983,9 +4134,9 @@ func (s *DescribeUserResponse) SetUsername(v string) *DescribeUserResponse { type EncryptionOptions struct { _ struct{} `type:"structure"` - // The customer master key (CMK) to use for the AWS Key Management Service (KMS). - // This key is used to encrypt your data at rest. If not provided, Amazon MQ - // will use a default CMK to encrypt your data. + // The symmetric customer master key (CMK) to use for the AWS Key Management + // Service (KMS). This key is used to encrypt your data at rest. If not provided, + // Amazon MQ will use a default CMK to encrypt your data. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // Enables the use of an AWS owned CMK using AWS Key Management Service (KMS). @@ -4169,6 +4320,233 @@ func (s *InternalServerErrorException) RequestID() string { return s.RespMetadata.RequestID } +// The metadata of the LDAP server used to authenticate and authorize connections +// to the broker. +type LdapServerMetadataInput struct { + _ struct{} `type:"structure"` + + // Fully qualified domain name of the LDAP server. Optional failover server. + Hosts []*string `locationName:"hosts" type:"list"` + + // Fully qualified name of the directory to search for a user’s groups. + RoleBase *string `locationName:"roleBase" type:"string"` + + // Specifies the LDAP attribute that identifies the group name attribute in + // the object returned from the group membership query. + RoleName *string `locationName:"roleName" type:"string"` + + // The search criteria for groups. + RoleSearchMatching *string `locationName:"roleSearchMatching" type:"string"` + + // The directory search scope for the role. If set to true, scope is to search + // the entire sub-tree. + RoleSearchSubtree *bool `locationName:"roleSearchSubtree" type:"boolean"` + + // Service account password. + ServiceAccountPassword *string `locationName:"serviceAccountPassword" type:"string"` + + // Service account username. + ServiceAccountUsername *string `locationName:"serviceAccountUsername" type:"string"` + + // Fully qualified name of the directory where you want to search for users. + UserBase *string `locationName:"userBase" type:"string"` + + // Specifies the name of the LDAP attribute for the user group membership. + UserRoleName *string `locationName:"userRoleName" type:"string"` + + // The search criteria for users. + UserSearchMatching *string `locationName:"userSearchMatching" type:"string"` + + // The directory search scope for the user. If set to true, scope is to search + // the entire sub-tree. + UserSearchSubtree *bool `locationName:"userSearchSubtree" type:"boolean"` +} + +// String returns the string representation +func (s LdapServerMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LdapServerMetadataInput) GoString() string { + return s.String() +} + +// SetHosts sets the Hosts field's value. +func (s *LdapServerMetadataInput) SetHosts(v []*string) *LdapServerMetadataInput { + s.Hosts = v + return s +} + +// SetRoleBase sets the RoleBase field's value. +func (s *LdapServerMetadataInput) SetRoleBase(v string) *LdapServerMetadataInput { + s.RoleBase = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *LdapServerMetadataInput) SetRoleName(v string) *LdapServerMetadataInput { + s.RoleName = &v + return s +} + +// SetRoleSearchMatching sets the RoleSearchMatching field's value. +func (s *LdapServerMetadataInput) SetRoleSearchMatching(v string) *LdapServerMetadataInput { + s.RoleSearchMatching = &v + return s +} + +// SetRoleSearchSubtree sets the RoleSearchSubtree field's value. +func (s *LdapServerMetadataInput) SetRoleSearchSubtree(v bool) *LdapServerMetadataInput { + s.RoleSearchSubtree = &v + return s +} + +// SetServiceAccountPassword sets the ServiceAccountPassword field's value. +func (s *LdapServerMetadataInput) SetServiceAccountPassword(v string) *LdapServerMetadataInput { + s.ServiceAccountPassword = &v + return s +} + +// SetServiceAccountUsername sets the ServiceAccountUsername field's value. +func (s *LdapServerMetadataInput) SetServiceAccountUsername(v string) *LdapServerMetadataInput { + s.ServiceAccountUsername = &v + return s +} + +// SetUserBase sets the UserBase field's value. +func (s *LdapServerMetadataInput) SetUserBase(v string) *LdapServerMetadataInput { + s.UserBase = &v + return s +} + +// SetUserRoleName sets the UserRoleName field's value. +func (s *LdapServerMetadataInput) SetUserRoleName(v string) *LdapServerMetadataInput { + s.UserRoleName = &v + return s +} + +// SetUserSearchMatching sets the UserSearchMatching field's value. +func (s *LdapServerMetadataInput) SetUserSearchMatching(v string) *LdapServerMetadataInput { + s.UserSearchMatching = &v + return s +} + +// SetUserSearchSubtree sets the UserSearchSubtree field's value. +func (s *LdapServerMetadataInput) SetUserSearchSubtree(v bool) *LdapServerMetadataInput { + s.UserSearchSubtree = &v + return s +} + +// The metadata of the LDAP server used to authenticate and authorize connections +// to the broker. +type LdapServerMetadataOutput struct { + _ struct{} `type:"structure"` + + // Fully qualified domain name of the LDAP server. Optional failover server. + Hosts []*string `locationName:"hosts" type:"list"` + + // Fully qualified name of the directory to search for a user’s groups. + RoleBase *string `locationName:"roleBase" type:"string"` + + // Specifies the LDAP attribute that identifies the group name attribute in + // the object returned from the group membership query. + RoleName *string `locationName:"roleName" type:"string"` + + // The search criteria for groups. + RoleSearchMatching *string `locationName:"roleSearchMatching" type:"string"` + + // The directory search scope for the role. If set to true, scope is to search + // the entire sub-tree. + RoleSearchSubtree *bool `locationName:"roleSearchSubtree" type:"boolean"` + + // Service account username. + ServiceAccountUsername *string `locationName:"serviceAccountUsername" type:"string"` + + // Fully qualified name of the directory where you want to search for users. + UserBase *string `locationName:"userBase" type:"string"` + + // Specifies the name of the LDAP attribute for the user group membership. + UserRoleName *string `locationName:"userRoleName" type:"string"` + + // The search criteria for users. + UserSearchMatching *string `locationName:"userSearchMatching" type:"string"` + + // The directory search scope for the user. If set to true, scope is to search + // the entire sub-tree. + UserSearchSubtree *bool `locationName:"userSearchSubtree" type:"boolean"` +} + +// String returns the string representation +func (s LdapServerMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LdapServerMetadataOutput) GoString() string { + return s.String() +} + +// SetHosts sets the Hosts field's value. +func (s *LdapServerMetadataOutput) SetHosts(v []*string) *LdapServerMetadataOutput { + s.Hosts = v + return s +} + +// SetRoleBase sets the RoleBase field's value. +func (s *LdapServerMetadataOutput) SetRoleBase(v string) *LdapServerMetadataOutput { + s.RoleBase = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *LdapServerMetadataOutput) SetRoleName(v string) *LdapServerMetadataOutput { + s.RoleName = &v + return s +} + +// SetRoleSearchMatching sets the RoleSearchMatching field's value. +func (s *LdapServerMetadataOutput) SetRoleSearchMatching(v string) *LdapServerMetadataOutput { + s.RoleSearchMatching = &v + return s +} + +// SetRoleSearchSubtree sets the RoleSearchSubtree field's value. +func (s *LdapServerMetadataOutput) SetRoleSearchSubtree(v bool) *LdapServerMetadataOutput { + s.RoleSearchSubtree = &v + return s +} + +// SetServiceAccountUsername sets the ServiceAccountUsername field's value. +func (s *LdapServerMetadataOutput) SetServiceAccountUsername(v string) *LdapServerMetadataOutput { + s.ServiceAccountUsername = &v + return s +} + +// SetUserBase sets the UserBase field's value. +func (s *LdapServerMetadataOutput) SetUserBase(v string) *LdapServerMetadataOutput { + s.UserBase = &v + return s +} + +// SetUserRoleName sets the UserRoleName field's value. +func (s *LdapServerMetadataOutput) SetUserRoleName(v string) *LdapServerMetadataOutput { + s.UserRoleName = &v + return s +} + +// SetUserSearchMatching sets the UserSearchMatching field's value. +func (s *LdapServerMetadataOutput) SetUserSearchMatching(v string) *LdapServerMetadataOutput { + s.UserSearchMatching = &v + return s +} + +// SetUserSearchSubtree sets the UserSearchSubtree field's value. +func (s *LdapServerMetadataOutput) SetUserSearchSubtree(v bool) *LdapServerMetadataOutput { + s.UserSearchSubtree = &v + return s +} + type ListBrokersInput struct { _ struct{} `type:"structure"` @@ -4938,6 +5316,9 @@ func (s *UnauthorizedException) RequestID() string { type UpdateBrokerRequest struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` // BrokerId is a required field @@ -4950,6 +5331,10 @@ type UpdateBrokerRequest struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataInput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` @@ -4982,6 +5367,12 @@ func (s *UpdateBrokerRequest) Validate() error { return nil } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *UpdateBrokerRequest) SetAuthenticationStrategy(v string) *UpdateBrokerRequest { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *UpdateBrokerRequest) SetAutoMinorVersionUpgrade(v bool) *UpdateBrokerRequest { s.AutoMinorVersionUpgrade = &v @@ -5012,6 +5403,12 @@ func (s *UpdateBrokerRequest) SetHostInstanceType(v string) *UpdateBrokerRequest return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *UpdateBrokerRequest) SetLdapServerMetadata(v *LdapServerMetadataInput) *UpdateBrokerRequest { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *UpdateBrokerRequest) SetLogs(v *Logs) *UpdateBrokerRequest { s.Logs = v @@ -5027,6 +5424,9 @@ func (s *UpdateBrokerRequest) SetSecurityGroups(v []*string) *UpdateBrokerReques type UpdateBrokerResponse struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` BrokerId *string `locationName:"brokerId" type:"string"` @@ -5038,6 +5438,10 @@ type UpdateBrokerResponse struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataOutput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` @@ -5054,6 +5458,12 @@ func (s UpdateBrokerResponse) GoString() string { return s.String() } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *UpdateBrokerResponse) SetAuthenticationStrategy(v string) *UpdateBrokerResponse { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *UpdateBrokerResponse) SetAutoMinorVersionUpgrade(v bool) *UpdateBrokerResponse { s.AutoMinorVersionUpgrade = &v @@ -5084,6 +5494,12 @@ func (s *UpdateBrokerResponse) SetHostInstanceType(v string) *UpdateBrokerRespon return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *UpdateBrokerResponse) SetLdapServerMetadata(v *LdapServerMetadataOutput) *UpdateBrokerResponse { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *UpdateBrokerResponse) SetLogs(v *Logs) *UpdateBrokerResponse { s.Logs = v @@ -5487,6 +5903,15 @@ func (s *WeeklyStartTime) SetTimeZone(v string) *WeeklyStartTime { return s } +// The authentication strategy used to secure the broker. +const ( + // AuthenticationStrategySimple is a AuthenticationStrategy enum value + AuthenticationStrategySimple = "SIMPLE" + + // AuthenticationStrategyLdap is a AuthenticationStrategy enum value + AuthenticationStrategyLdap = "LDAP" +) + // The status of the broker. const ( // BrokerStateCreationInProgress is a BrokerState enum value diff --git a/service/mq/mqiface/interface.go b/service/mq/mqiface/interface.go index c0e31751083..d32434814b6 100644 --- a/service/mq/mqiface/interface.go +++ b/service/mq/mqiface/interface.go @@ -116,6 +116,9 @@ type MQAPI interface { ListBrokersWithContext(aws.Context, *mq.ListBrokersInput, ...request.Option) (*mq.ListBrokersResponse, error) ListBrokersRequest(*mq.ListBrokersInput) (*request.Request, *mq.ListBrokersResponse) + ListBrokersPages(*mq.ListBrokersInput, func(*mq.ListBrokersResponse, bool) bool) error + ListBrokersPagesWithContext(aws.Context, *mq.ListBrokersInput, func(*mq.ListBrokersResponse, bool) bool, ...request.Option) error + ListConfigurationRevisions(*mq.ListConfigurationRevisionsInput) (*mq.ListConfigurationRevisionsResponse, error) ListConfigurationRevisionsWithContext(aws.Context, *mq.ListConfigurationRevisionsInput, ...request.Option) (*mq.ListConfigurationRevisionsResponse, error) ListConfigurationRevisionsRequest(*mq.ListConfigurationRevisionsInput) (*request.Request, *mq.ListConfigurationRevisionsResponse) diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index ef9d7ee7931..d31b9e382bb 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -583,7 +583,7 @@ func (c *SageMaker) CreateCompilationJobRequest(input *CreateCompilationJobInput // the model runs on // // * The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker -// assumes to perform the model compilation job +// assumes to perform the model compilation job. // // You can also provide a Tag to track the model compilation job's resource // use and costs. The response body contains the CompilationJobArn for the compiled @@ -1007,9 +1007,9 @@ func (c *SageMaker) CreateExperimentRequest(input *CreateExperimentInput) (req * // CreateExperiment API operation for Amazon SageMaker Service. // -// Creates an Amazon SageMaker experiment. An experiment is a collection of -// trials that are observed, compared and evaluated as a group. A trial is a -// set of steps, called trial components, that produce a machine learning model. +// Creates an SageMaker experiment. An experiment is a collection of trials +// that are observed, compared and evaluated as a group. A trial is a set of +// steps, called trial components, that produce a machine learning model. // // The goal of an experiment is to determine the components that produce the // best model. Multiple trials are performed, each one isolating and measuring @@ -2708,6 +2708,96 @@ func (c *SageMaker) CreateUserProfileWithContext(ctx aws.Context, input *CreateU return out, req.Send() } +const opCreateWorkforce = "CreateWorkforce" + +// CreateWorkforceRequest generates a "aws/request.Request" representing the +// client's request for the CreateWorkforce operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateWorkforce for more information on using the CreateWorkforce +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateWorkforceRequest method. +// req, resp := client.CreateWorkforceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateWorkforce +func (c *SageMaker) CreateWorkforceRequest(input *CreateWorkforceInput) (req *request.Request, output *CreateWorkforceOutput) { + op := &request.Operation{ + Name: opCreateWorkforce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWorkforceInput{} + } + + output = &CreateWorkforceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateWorkforce API operation for Amazon SageMaker Service. +// +// Use this operation to create a workforce. This operation will return an error +// if a workforce already exists in the AWS Region that you specify. You can +// only create one workforce in each AWS Region. +// +// If you want to create a new workforce in an AWS Region where the a workforce +// already exists, use the API operation to delete the existing workforce and +// then use this operation to create a new workforce. +// +// To create a private workforce using Amazon Cognito, you must specify a Cognito +// user pool in CognitoConfig. You can also create an Amazon Cognito workforce +// using the Amazon SageMaker console. For more information, see Create a Private +// Workforce (Amazon Cognito) (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). +// +// To create a private workforce using your own OIDC Identity Provider (IdP), +// specify your IdP configuration in OidcConfig. You must create a OIDC IdP +// workforce using this API operation. For more information, see Create a Private +// Workforce (OIDC IdP) (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private-oidc.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateWorkforce for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateWorkforce +func (c *SageMaker) CreateWorkforce(input *CreateWorkforceInput) (*CreateWorkforceOutput, error) { + req, out := c.CreateWorkforceRequest(input) + return out, req.Send() +} + +// CreateWorkforceWithContext is the same as CreateWorkforce with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWorkforce for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateWorkforceWithContext(ctx aws.Context, input *CreateWorkforceInput, opts ...request.Option) (*CreateWorkforceOutput, error) { + req, out := c.CreateWorkforceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateWorkteam = "CreateWorkteam" // CreateWorkteamRequest generates a "aws/request.Request" representing the @@ -3414,6 +3504,9 @@ func (c *SageMaker) DeleteFlowDefinitionRequest(input *DeleteFlowDefinitionInput // API operation DeleteFlowDefinition for usage and error information. // // Returned Error Types: +// * ResourceInUse +// Resource being accessed is in use. +// // * ResourceNotFound // Resource being access is not found. // @@ -3484,7 +3577,7 @@ func (c *SageMaker) DeleteHumanTaskUiRequest(input *DeleteHumanTaskUiInput) (req // DeleteHumanTaskUi API operation for Amazon SageMaker Service. // -// Use this operation to delete a worker task template (HumanTaskUi). +// Use this operation to delete a human task user interface (worker task template). // // To see a list of human task user interfaces (work task templates) in your // account, use . When you delete a worker task template, it no longer appears @@ -4244,6 +4337,85 @@ func (c *SageMaker) DeleteUserProfileWithContext(ctx aws.Context, input *DeleteU return out, req.Send() } +const opDeleteWorkforce = "DeleteWorkforce" + +// DeleteWorkforceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWorkforce operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteWorkforce for more information on using the DeleteWorkforce +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteWorkforceRequest method. +// req, resp := client.DeleteWorkforceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteWorkforce +func (c *SageMaker) DeleteWorkforceRequest(input *DeleteWorkforceInput) (req *request.Request, output *DeleteWorkforceOutput) { + op := &request.Operation{ + Name: opDeleteWorkforce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteWorkforceInput{} + } + + output = &DeleteWorkforceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteWorkforce API operation for Amazon SageMaker Service. +// +// Use this operation to delete a workforce. +// +// If you want to create a new workforce in an AWS Region where the a workforce +// already exists, use this operation to delete the existing workforce and then +// use to create a new workforce. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteWorkforce for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteWorkforce +func (c *SageMaker) DeleteWorkforce(input *DeleteWorkforceInput) (*DeleteWorkforceOutput, error) { + req, out := c.DeleteWorkforceRequest(input) + return out, req.Send() +} + +// DeleteWorkforceWithContext is the same as DeleteWorkforce with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteWorkforce for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteWorkforceWithContext(ctx aws.Context, input *DeleteWorkforceInput, opts ...request.Option) (*DeleteWorkforceOutput, error) { + req, out := c.DeleteWorkforceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteWorkteam = "DeleteWorkteam" // DeleteWorkteamRequest generates a "aws/request.Request" representing the @@ -10595,6 +10767,139 @@ func (c *SageMaker) ListUserProfilesPagesWithContext(ctx aws.Context, input *Lis return p.Err() } +const opListWorkforces = "ListWorkforces" + +// ListWorkforcesRequest generates a "aws/request.Request" representing the +// client's request for the ListWorkforces operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListWorkforces for more information on using the ListWorkforces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListWorkforcesRequest method. +// req, resp := client.ListWorkforcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListWorkforces +func (c *SageMaker) ListWorkforcesRequest(input *ListWorkforcesInput) (req *request.Request, output *ListWorkforcesOutput) { + op := &request.Operation{ + Name: opListWorkforces, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListWorkforcesInput{} + } + + output = &ListWorkforcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWorkforces API operation for Amazon SageMaker Service. +// +// Use this operation to list all private and vendor workforces in an AWS Region. +// Note that you can only have one private workforce per AWS Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListWorkforces for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListWorkforces +func (c *SageMaker) ListWorkforces(input *ListWorkforcesInput) (*ListWorkforcesOutput, error) { + req, out := c.ListWorkforcesRequest(input) + return out, req.Send() +} + +// ListWorkforcesWithContext is the same as ListWorkforces with the addition of +// the ability to pass a context and additional request options. +// +// See ListWorkforces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListWorkforcesWithContext(ctx aws.Context, input *ListWorkforcesInput, opts ...request.Option) (*ListWorkforcesOutput, error) { + req, out := c.ListWorkforcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListWorkforcesPages iterates over the pages of a ListWorkforces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListWorkforces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListWorkforces operation. +// pageNum := 0 +// err := client.ListWorkforcesPages(params, +// func(page *sagemaker.ListWorkforcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListWorkforcesPages(input *ListWorkforcesInput, fn func(*ListWorkforcesOutput, bool) bool) error { + return c.ListWorkforcesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListWorkforcesPagesWithContext same as ListWorkforcesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListWorkforcesPagesWithContext(ctx aws.Context, input *ListWorkforcesInput, fn func(*ListWorkforcesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListWorkforcesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListWorkforcesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListWorkforcesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListWorkteams = "ListWorkteams" // ListWorkteamsRequest generates a "aws/request.Request" representing the @@ -10781,6 +11086,11 @@ func (c *SageMaker) RenderUiTemplateRequest(input *RenderUiTemplateInput) (req * // // See the AWS API reference guide for Amazon SageMaker Service's // API operation RenderUiTemplate for usage and error information. +// +// Returned Error Types: +// * ResourceNotFound +// Resource being access is not found. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/RenderUiTemplate func (c *SageMaker) RenderUiTemplate(input *RenderUiTemplateInput) (*RenderUiTemplateOutput, error) { req, out := c.RenderUiTemplateRequest(input) @@ -13583,6 +13893,72 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition // arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition // + // Named entity recognition - Groups similar selections and calculates aggregate + // boundaries, resolving to most-assigned label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition + // + // Video Classification - Use this task type when you need workers to classify + // videos using predefined labels that you specify. Workers are shown videos + // and are asked to choose one label for each video. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass + // arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass + // + // Video Frame Object Detection - Use this task type to have workers identify + // and locate objects in a sequence of video frames (images extracted from a + // video) using bounding boxes. For example, you can use this task to ask workers + // to identify and localize various objects in a series of video frames, such + // as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection + // arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection + // arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection + // + // Video Frame Object Tracking - Use this task type to have workers track the + // movement of objects in a sequence of video frames (images extracted from + // a video) using bounding boxes. For example, you can use this task to ask + // workers to track the movement of objects, such as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking + // arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking + // arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking + // // 3D point cloud object detection - Use this task type when you want workers // to classify objects in a 3D point cloud by drawing 3D cuboids around objects. // For example, you can use this task type to ask workers to identify different @@ -13709,6 +14085,40 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox // + // Video Frame Object Detection Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to classify and localize objects in a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection + // + // Video Frame Object Tracking Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to track object movement across a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking + // // 3D point cloud object detection adjustment - Use this task type when you // want workers to adjust 3D cuboids around objects in a 3D point cloud. // @@ -15343,6 +15753,71 @@ func (s *CodeRepositorySummary) SetLastModifiedTime(v time.Time) *CodeRepository return s } +// Use this parameter to configure your Amazon Cognito workforce. A single Cognito +// workforce is created using and corresponds to a single Amazon Cognito user +// pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). +type CognitoConfig struct { + _ struct{} `type:"structure"` + + // The client ID for your Amazon Cognito user pool. + // + // ClientId is a required field + ClientId *string `min:"1" type:"string" required:"true"` + + // A user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html) + // is a user directory in Amazon Cognito. With a user pool, your users can sign + // in to your web or mobile app through Amazon Cognito. Your users can also + // sign in through social identity providers like Google, Facebook, Amazon, + // or Apple, and through SAML identity providers. + // + // UserPool is a required field + UserPool *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CognitoConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CognitoConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CognitoConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CognitoConfig"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.UserPool == nil { + invalidParams.Add(request.NewErrParamRequired("UserPool")) + } + if s.UserPool != nil && len(*s.UserPool) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPool", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CognitoConfig) SetClientId(v string) *CognitoConfig { + s.ClientId = &v + return s +} + +// SetUserPool sets the UserPool field's value. +func (s *CognitoConfig) SetUserPool(v string) *CognitoConfig { + s.UserPool = &v + return s +} + // Identifies a Amazon Cognito user group. A user group can be used in on or // more work teams. type CognitoMemberDefinition struct { @@ -15495,10 +15970,20 @@ type CompilationJobSummary struct { // The time when the model compilation job started. CompilationStartTime *time.Time `type:"timestamp"` - // The type of device that the model will run on after compilation has completed. - // - // CompilationTargetDevice is a required field - CompilationTargetDevice *string `type:"string" required:"true" enum:"TargetDevice"` + // The type of device that the model will run on after the compilation job has + // completed. + CompilationTargetDevice *string `type:"string" enum:"TargetDevice"` + + // The type of accelerator that the model will run on after the compilation + // job has completed. + CompilationTargetPlatformAccelerator *string `type:"string" enum:"TargetPlatformAccelerator"` + + // The type of architecture that the model will run on after the compilation + // job has completed. + CompilationTargetPlatformArch *string `type:"string" enum:"TargetPlatformArch"` + + // The type of OS that the model will run on after the compilation job has completed. + CompilationTargetPlatformOs *string `type:"string" enum:"TargetPlatformOs"` // The time when the model compilation job was created. // @@ -15555,6 +16040,24 @@ func (s *CompilationJobSummary) SetCompilationTargetDevice(v string) *Compilatio return s } +// SetCompilationTargetPlatformAccelerator sets the CompilationTargetPlatformAccelerator field's value. +func (s *CompilationJobSummary) SetCompilationTargetPlatformAccelerator(v string) *CompilationJobSummary { + s.CompilationTargetPlatformAccelerator = &v + return s +} + +// SetCompilationTargetPlatformArch sets the CompilationTargetPlatformArch field's value. +func (s *CompilationJobSummary) SetCompilationTargetPlatformArch(v string) *CompilationJobSummary { + s.CompilationTargetPlatformArch = &v + return s +} + +// SetCompilationTargetPlatformOs sets the CompilationTargetPlatformOs field's value. +func (s *CompilationJobSummary) SetCompilationTargetPlatformOs(v string) *CompilationJobSummary { + s.CompilationTargetPlatformOs = &v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *CompilationJobSummary) SetCreationTime(v time.Time) *CompilationJobSummary { s.CreationTime = &v @@ -18864,7 +19367,14 @@ type CreateProcessingJobInput struct { // Sets the environment variables in the Docker container. Environment map[string]*string `type:"map"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // Networking options for a processing job. @@ -19136,7 +19646,14 @@ type CreateTrainingJobInput struct { // have network access. EnableNetworkIsolation *bool `type:"boolean"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // Algorithm-specific parameters that influence the quality of the model. You @@ -19517,7 +20034,14 @@ type CreateTransformJobInput struct { // 16 key and values entries in the map. Environment map[string]*string `type:"map"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // The maximum number of parallel requests that can be sent to each instance @@ -20185,6 +20709,142 @@ func (s *CreateUserProfileOutput) SetUserProfileArn(v string) *CreateUserProfile return s } +type CreateWorkforceInput struct { + _ struct{} `type:"structure"` + + // Use this parameter to configure an Amazon Cognito private workforce. A single + // Cognito workforce is created using and corresponds to a single Amazon Cognito + // user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + // + // Do not use OidcConfig if you specify values for CognitoConfig. + CognitoConfig *CognitoConfig `type:"structure"` + + // Use this parameter to configure a private workforce using your own OIDC Identity + // Provider. Do not use CognitoConfig if you specify values for OidcConfig. + OidcConfig *OidcConfig `type:"structure"` + + // A list of IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)). + // Used to create an allow list of IP addresses for a private workforce. For + // more information, see . + SourceIpConfig *SourceIpConfig `type:"structure"` + + // An array of key-value pairs that contain metadata to help you categorize + // and organize our workforce. Each tag consists of a key and a value, both + // of which you define. + Tags []*Tag `type:"list"` + + // The name of the private workforce. + // + // WorkforceName is a required field + WorkforceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWorkforceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkforceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWorkforceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWorkforceInput"} + if s.WorkforceName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkforceName")) + } + if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) + } + if s.CognitoConfig != nil { + if err := s.CognitoConfig.Validate(); err != nil { + invalidParams.AddNested("CognitoConfig", err.(request.ErrInvalidParams)) + } + } + if s.OidcConfig != nil { + if err := s.OidcConfig.Validate(); err != nil { + invalidParams.AddNested("OidcConfig", err.(request.ErrInvalidParams)) + } + } + if s.SourceIpConfig != nil { + if err := s.SourceIpConfig.Validate(); err != nil { + invalidParams.AddNested("SourceIpConfig", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCognitoConfig sets the CognitoConfig field's value. +func (s *CreateWorkforceInput) SetCognitoConfig(v *CognitoConfig) *CreateWorkforceInput { + s.CognitoConfig = v + return s +} + +// SetOidcConfig sets the OidcConfig field's value. +func (s *CreateWorkforceInput) SetOidcConfig(v *OidcConfig) *CreateWorkforceInput { + s.OidcConfig = v + return s +} + +// SetSourceIpConfig sets the SourceIpConfig field's value. +func (s *CreateWorkforceInput) SetSourceIpConfig(v *SourceIpConfig) *CreateWorkforceInput { + s.SourceIpConfig = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateWorkforceInput) SetTags(v []*Tag) *CreateWorkforceInput { + s.Tags = v + return s +} + +// SetWorkforceName sets the WorkforceName field's value. +func (s *CreateWorkforceInput) SetWorkforceName(v string) *CreateWorkforceInput { + s.WorkforceName = &v + return s +} + +type CreateWorkforceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the workforce. + // + // WorkforceArn is a required field + WorkforceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWorkforceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkforceOutput) GoString() string { + return s.String() +} + +// SetWorkforceArn sets the WorkforceArn field's value. +func (s *CreateWorkforceOutput) SetWorkforceArn(v string) *CreateWorkforceOutput { + s.WorkforceArn = &v + return s +} + type CreateWorkteamInput struct { _ struct{} `type:"structure"` @@ -20213,6 +20873,9 @@ type CreateWorkteamInput struct { // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` + // The name of the workforce. + WorkforceName *string `min:"1" type:"string"` + // The name of the work team. Use this name to identify the work team. // // WorkteamName is a required field @@ -20244,6 +20907,9 @@ func (s *CreateWorkteamInput) Validate() error { if s.MemberDefinitions != nil && len(s.MemberDefinitions) < 1 { invalidParams.Add(request.NewErrParamMinLen("MemberDefinitions", 1)) } + if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) + } if s.WorkteamName == nil { invalidParams.Add(request.NewErrParamRequired("WorkteamName")) } @@ -20301,6 +20967,12 @@ func (s *CreateWorkteamInput) SetTags(v []*Tag) *CreateWorkteamInput { return s } +// SetWorkforceName sets the WorkforceName field's value. +func (s *CreateWorkteamInput) SetWorkforceName(v string) *CreateWorkteamInput { + s.WorkforceName = &v + return s +} + // SetWorkteamName sets the WorkteamName field's value. func (s *CreateWorkteamInput) SetWorkteamName(v string) *CreateWorkteamInput { s.WorkteamName = &v @@ -21937,6 +22609,61 @@ func (s DeleteUserProfileOutput) GoString() string { return s.String() } +type DeleteWorkforceInput struct { + _ struct{} `type:"structure"` + + // The name of the workforce. + // + // WorkforceName is a required field + WorkforceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteWorkforceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWorkforceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWorkforceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWorkforceInput"} + if s.WorkforceName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkforceName")) + } + if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkforceName sets the WorkforceName field's value. +func (s *DeleteWorkforceInput) SetWorkforceName(v string) *DeleteWorkforceInput { + s.WorkforceName = &v + return s +} + +type DeleteWorkforceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteWorkforceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWorkforceOutput) GoString() string { + return s.String() +} + type DeleteWorkteamInput struct { _ struct{} `type:"structure"` @@ -25605,7 +26332,14 @@ type DescribeTrainingJobOutput struct { // have network access. EnableNetworkIsolation *bool `type:"boolean"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // If the training job failed, the reason it failed. @@ -26071,7 +26805,14 @@ type DescribeTransformJobOutput struct { // 16 key and values entries in the map. Environment map[string]*string `type:"map"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // If the transform job failed, FailureReason describes why it failed. A transform @@ -27467,17 +28208,26 @@ func (s *Experiment) SetTags(v []*Tag) *Experiment { return s } -// Configuration for the experiment. +// Associates a SageMaker job as a trial component with an experiment and trial. +// Specified when you call the following APIs: +// +// * CreateProcessingJob +// +// * CreateTrainingJob +// +// * CreateTransformJob type ExperimentConfig struct { _ struct{} `type:"structure"` - // The name of the experiment. + // The name of an existing experiment to associate the trial component with. ExperimentName *string `min:"1" type:"string"` - // Display name for the trial component. + // The display name for the trial component. If this key isn't specified, the + // display name is the trial component name. TrialComponentDisplayName *string `min:"1" type:"string"` - // The name of the trial. + // The name of an existing trial to associate the trial component with. If not + // specified, a new trial is created. TrialName *string `min:"1" type:"string"` } @@ -29012,6 +29762,93 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition // + // Video Classification - Use this task type when you need workers to classify + // videos using predefined labels that you specify. Workers are shown videos + // and are asked to choose one label for each video. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass + // + // Video Frame Object Detection - Use this task type to have workers identify + // and locate objects in a sequence of video frames (images extracted from a + // video) using bounding boxes. For example, you can use this task to ask workers + // to identify and localize various objects in a series of video frames, such + // as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection + // + // Video Frame Object Tracking - Use this task type to have workers track the + // movement of objects in a sequence of video frames (images extracted from + // a video) using bounding boxes. For example, you can use this task to ask + // workers to track the movement of objects, such as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking + // // 3D Point Cloud Modalities // // Use the following pre-annotation lambdas for 3D point cloud labeling modality @@ -29221,6 +30058,62 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation // + // Video Frame Object Detection Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to classify and localize objects in a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection + // + // Video Frame Object Tracking Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to track object movement across a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking + // // 3D point cloud object detection adjustment - Adjust 3D cuboids in a point // cloud frame. // @@ -31233,9 +32126,7 @@ type LabelingJobDataSource struct { _ struct{} `type:"structure"` // The Amazon S3 location of the input data objects. - // - // S3DataSource is a required field - S3DataSource *LabelingJobS3DataSource `type:"structure" required:"true"` + S3DataSource *LabelingJobS3DataSource `type:"structure"` } // String returns the string representation @@ -31251,9 +32142,6 @@ func (s LabelingJobDataSource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *LabelingJobDataSource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "LabelingJobDataSource"} - if s.S3DataSource == nil { - invalidParams.Add(request.NewErrParamRequired("S3DataSource")) - } if s.S3DataSource != nil { if err := s.S3DataSource.Validate(); err != nil { invalidParams.AddNested("S3DataSource", err.(request.ErrInvalidParams)) @@ -35956,6 +36844,116 @@ func (s *ListUserProfilesOutput) SetUserProfiles(v []*UserProfileDetails) *ListU return s } +type ListWorkforcesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of workforces returned in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A filter you can use to search for workforces using part of the workforce + // name. + NameContains *string `min:"1" type:"string"` + + // A token to resume pagination. + NextToken *string `type:"string"` + + // Sort workforces using the workforce name or creation date. + SortBy *string `type:"string" enum:"ListWorkforcesSortByOptions"` + + // Sort workforces in ascending or descending order. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s ListWorkforcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkforcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListWorkforcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWorkforcesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NameContains != nil && len(*s.NameContains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NameContains", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListWorkforcesInput) SetMaxResults(v int64) *ListWorkforcesInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListWorkforcesInput) SetNameContains(v string) *ListWorkforcesInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWorkforcesInput) SetNextToken(v string) *ListWorkforcesInput { + s.NextToken = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListWorkforcesInput) SetSortBy(v string) *ListWorkforcesInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListWorkforcesInput) SetSortOrder(v string) *ListWorkforcesInput { + s.SortOrder = &v + return s +} + +type ListWorkforcesOutput struct { + _ struct{} `type:"structure"` + + // A token to resume pagination. + NextToken *string `type:"string"` + + // A list containing information about your workforce. + // + // Workforces is a required field + Workforces []*Workforce `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListWorkforcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkforcesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWorkforcesOutput) SetNextToken(v string) *ListWorkforcesOutput { + s.NextToken = &v + return s +} + +// SetWorkforces sets the Workforces field's value. +func (s *ListWorkforcesOutput) SetWorkforces(v []*Workforce) *ListWorkforcesOutput { + s.Workforces = v + return s +} + type ListWorkteamsInput struct { _ struct{} `type:"structure"` @@ -36075,6 +37073,13 @@ type MemberDefinition struct { // The Amazon Cognito user group that is part of the work team. CognitoMemberDefinition *CognitoMemberDefinition `type:"structure"` + + // A list user groups that exist in your OIDC Identity Provider (IdP). One to + // ten groups can be used to create a single private work team. When you add + // a user group to the list of Groups, you can add that user group to one or + // more private work teams. If you add a user group to a private work team, + // all workers in that user group are added to the work team. + OidcMemberDefinition *OidcMemberDefinition `type:"structure"` } // String returns the string representation @@ -36095,6 +37100,11 @@ func (s *MemberDefinition) Validate() error { invalidParams.AddNested("CognitoMemberDefinition", err.(request.ErrInvalidParams)) } } + if s.OidcMemberDefinition != nil { + if err := s.OidcMemberDefinition.Validate(); err != nil { + invalidParams.AddNested("OidcMemberDefinition", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -36108,6 +37118,12 @@ func (s *MemberDefinition) SetCognitoMemberDefinition(v *CognitoMemberDefinition return s } +// SetOidcMemberDefinition sets the OidcMemberDefinition field's value. +func (s *MemberDefinition) SetOidcMemberDefinition(v *OidcMemberDefinition) *MemberDefinition { + s.OidcMemberDefinition = v + return s +} + // The name, value, and date and time of a metric that was emitted to Amazon // CloudWatch. type MetricData struct { @@ -38143,22 +39159,349 @@ func (s *ObjectiveStatusCounters) SetSucceeded(v int64) *ObjectiveStatusCounters return s } +// Use this parameter to configure your OIDC Identity Provider (IdP). +type OidcConfig struct { + _ struct{} `type:"structure"` + + // The OIDC IdP authorization endpoint used to configure your private workforce. + // + // AuthorizationEndpoint is a required field + AuthorizationEndpoint *string `type:"string" required:"true"` + + // The OIDC IdP client ID used to configure your private workforce. + // + // ClientId is a required field + ClientId *string `min:"1" type:"string" required:"true"` + + // The OIDC IdP client secret used to configure your private workforce. + // + // ClientSecret is a required field + ClientSecret *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The OIDC IdP issuer used to configure your private workforce. + // + // Issuer is a required field + Issuer *string `type:"string" required:"true"` + + // The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. + // + // JwksUri is a required field + JwksUri *string `type:"string" required:"true"` + + // The OIDC IdP logout endpoint used to configure your private workforce. + // + // LogoutEndpoint is a required field + LogoutEndpoint *string `type:"string" required:"true"` + + // The OIDC IdP token endpoint used to configure your private workforce. + // + // TokenEndpoint is a required field + TokenEndpoint *string `type:"string" required:"true"` + + // The OIDC IdP user information endpoint used to configure your private workforce. + // + // UserInfoEndpoint is a required field + UserInfoEndpoint *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OidcConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OidcConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OidcConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OidcConfig"} + if s.AuthorizationEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizationEndpoint")) + } + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.ClientSecret != nil && len(*s.ClientSecret) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientSecret", 1)) + } + if s.Issuer == nil { + invalidParams.Add(request.NewErrParamRequired("Issuer")) + } + if s.JwksUri == nil { + invalidParams.Add(request.NewErrParamRequired("JwksUri")) + } + if s.LogoutEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("LogoutEndpoint")) + } + if s.TokenEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("TokenEndpoint")) + } + if s.UserInfoEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("UserInfoEndpoint")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *OidcConfig) SetAuthorizationEndpoint(v string) *OidcConfig { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *OidcConfig) SetClientId(v string) *OidcConfig { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *OidcConfig) SetClientSecret(v string) *OidcConfig { + s.ClientSecret = &v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *OidcConfig) SetIssuer(v string) *OidcConfig { + s.Issuer = &v + return s +} + +// SetJwksUri sets the JwksUri field's value. +func (s *OidcConfig) SetJwksUri(v string) *OidcConfig { + s.JwksUri = &v + return s +} + +// SetLogoutEndpoint sets the LogoutEndpoint field's value. +func (s *OidcConfig) SetLogoutEndpoint(v string) *OidcConfig { + s.LogoutEndpoint = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *OidcConfig) SetTokenEndpoint(v string) *OidcConfig { + s.TokenEndpoint = &v + return s +} + +// SetUserInfoEndpoint sets the UserInfoEndpoint field's value. +func (s *OidcConfig) SetUserInfoEndpoint(v string) *OidcConfig { + s.UserInfoEndpoint = &v + return s +} + +// Your Amazon Cognito workforce configuration. +type OidcConfigForResponse struct { + _ struct{} `type:"structure"` + + // The OIDC IdP authorization endpoint used to configure your private workforce. + AuthorizationEndpoint *string `type:"string"` + + // The OIDC IdP client ID used to configure your private workforce. + ClientId *string `min:"1" type:"string"` + + // The OIDC IdP issuer used to configure your private workforce. + Issuer *string `type:"string"` + + // The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. + JwksUri *string `type:"string"` + + // The OIDC IdP logout endpoint used to configure your private workforce. + LogoutEndpoint *string `type:"string"` + + // The OIDC IdP token endpoint used to configure your private workforce. + TokenEndpoint *string `type:"string"` + + // The OIDC IdP user information endpoint used to configure your private workforce. + UserInfoEndpoint *string `type:"string"` +} + +// String returns the string representation +func (s OidcConfigForResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OidcConfigForResponse) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *OidcConfigForResponse) SetAuthorizationEndpoint(v string) *OidcConfigForResponse { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *OidcConfigForResponse) SetClientId(v string) *OidcConfigForResponse { + s.ClientId = &v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *OidcConfigForResponse) SetIssuer(v string) *OidcConfigForResponse { + s.Issuer = &v + return s +} + +// SetJwksUri sets the JwksUri field's value. +func (s *OidcConfigForResponse) SetJwksUri(v string) *OidcConfigForResponse { + s.JwksUri = &v + return s +} + +// SetLogoutEndpoint sets the LogoutEndpoint field's value. +func (s *OidcConfigForResponse) SetLogoutEndpoint(v string) *OidcConfigForResponse { + s.LogoutEndpoint = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *OidcConfigForResponse) SetTokenEndpoint(v string) *OidcConfigForResponse { + s.TokenEndpoint = &v + return s +} + +// SetUserInfoEndpoint sets the UserInfoEndpoint field's value. +func (s *OidcConfigForResponse) SetUserInfoEndpoint(v string) *OidcConfigForResponse { + s.UserInfoEndpoint = &v + return s +} + +// A list user groups that exist in your OIDC Identity Provider (IdP). One to +// ten groups can be used to create a single private work team. When you add +// a user group to the list of Groups, you can add that user group to one or +// more private work teams. If you add a user group to a private work team, +// all workers in that user group are added to the work team. +type OidcMemberDefinition struct { + _ struct{} `type:"structure"` + + // A list of comma seperated strings that identifies user groups in your OIDC + // IdP. Each user group is made up of a group of private workers. + // + // Groups is a required field + Groups []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s OidcMemberDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OidcMemberDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OidcMemberDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OidcMemberDefinition"} + if s.Groups == nil { + invalidParams.Add(request.NewErrParamRequired("Groups")) + } + if s.Groups != nil && len(s.Groups) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Groups", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroups sets the Groups field's value. +func (s *OidcMemberDefinition) SetGroups(v []*string) *OidcMemberDefinition { + s.Groups = v + return s +} + // Contains information about the output location for the compiled model and -// the device (target) that the model runs on. +// the target device that the model runs on. TargetDevice and TargetPlatform +// are mutually exclusive, so you need to choose one between the two to specify +// your target device or platform. If you cannot find your device you want to +// use from the TargetDevice list, use TargetPlatform to describe the platform +// of your edge device and CompilerOptions if there are specific settings that +// are required or recommended to use for particular TargetPlatform. type OutputConfig struct { _ struct{} `type:"structure"` - // Identifies the S3 path where you want Amazon SageMaker to store the model + // Specifies additional parameters for compiler options in JSON format. The + // compiler options are TargetPlatform specific. It is required for NVIDIA accelerators + // and highly recommended for CPU compliations. For any other cases, it is optional + // to specify CompilerOptions. + // + // * CPU: Compilation for CPU supports the following compiler options. mcpu: + // CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} mattr: + // CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} + // + // * ARM: Details of ARM CPU compilations. NEON: NEON is an implementation + // of the Advanced SIMD extension used in ARMv7 processors. For example, + // add {'mattr': ['+neon']} to the compiler options if compiling for ARM + // 32-bit platform with the NEON support. + // + // * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. + // gpu_code: Specifies the targeted architecture. trt-ver: Specifies the + // TensorRT versions in x.y.z. format. cuda-ver: Specifies the CUDA version + // in x.y format. For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', + // 'cuda-ver': '10.1'} + // + // * ANDROID: Compilation for the Android OS supports the following compiler + // options: ANDROID_PLATFORM: Specifies the Android API levels. Available + // levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: + // Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit + // platform with NEON support. + CompilerOptions *string `min:"7" type:"string"` + + // Identifies the S3 bucket where you want Amazon SageMaker to store the model // artifacts. For example, s3://bucket-name/key-name-prefix. // // S3OutputLocation is a required field S3OutputLocation *string `type:"string" required:"true"` - // Identifies the device that you want to run your model on after it has been - // compiled. For example: ml_c5. + // Identifies the target device or the machine learning instance that you want + // to run your model on after the compilation has completed. Alternatively, + // you can specify OS, architecture, and accelerator using TargetPlatform fields. + // It can be used instead of TargetPlatform. + TargetDevice *string `type:"string" enum:"TargetDevice"` + + // Contains information about a target platform that you want your model to + // run on, such as OS, architecture, and accelerators. It is an alternative + // of TargetDevice. + // + // The following examples show how to configure the TargetPlatform and CompilerOptions + // JSON strings for popular target platforms: + // + // * Raspberry Pi 3 Model B+ "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, + // "CompilerOptions": {'mattr': ['+neon']} + // + // * Jetson TX2 "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": + // "NVIDIA"}, "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', + // 'cuda-ver': '10.0'} + // + // * EC2 m5.2xlarge instance OS "TargetPlatform": {"Os": "LINUX", "Arch": + // "X86_64", "Accelerator": "NVIDIA"}, "CompilerOptions": {'mcpu': 'skylake-avx512'} // - // TargetDevice is a required field - TargetDevice *string `type:"string" required:"true" enum:"TargetDevice"` + // * RK3399 "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": + // "MALI"} + // + // * ARMv7 phone (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, + // "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} + // + // * ARMv8 phone (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, + // "CompilerOptions": {'ANDROID_PLATFORM': 29} + TargetPlatform *TargetPlatform `type:"structure"` } // String returns the string representation @@ -38174,11 +39517,16 @@ func (s OutputConfig) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *OutputConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "OutputConfig"} + if s.CompilerOptions != nil && len(*s.CompilerOptions) < 7 { + invalidParams.Add(request.NewErrParamMinLen("CompilerOptions", 7)) + } if s.S3OutputLocation == nil { invalidParams.Add(request.NewErrParamRequired("S3OutputLocation")) } - if s.TargetDevice == nil { - invalidParams.Add(request.NewErrParamRequired("TargetDevice")) + if s.TargetPlatform != nil { + if err := s.TargetPlatform.Validate(); err != nil { + invalidParams.AddNested("TargetPlatform", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -38187,6 +39535,12 @@ func (s *OutputConfig) Validate() error { return nil } +// SetCompilerOptions sets the CompilerOptions field's value. +func (s *OutputConfig) SetCompilerOptions(v string) *OutputConfig { + s.CompilerOptions = &v + return s +} + // SetS3OutputLocation sets the S3OutputLocation field's value. func (s *OutputConfig) SetS3OutputLocation(v string) *OutputConfig { s.S3OutputLocation = &v @@ -38199,6 +39553,12 @@ func (s *OutputConfig) SetTargetDevice(v string) *OutputConfig { return s } +// SetTargetPlatform sets the TargetPlatform field's value. +func (s *OutputConfig) SetTargetPlatform(v *TargetPlatform) *OutputConfig { + s.TargetPlatform = v + return s +} + // Provides information about how to store model training results (model artifacts). type OutputDataConfig struct { _ struct{} `type:"structure"` @@ -38678,7 +40038,14 @@ type ProcessingJob struct { // container when the processing job exits. ExitMessage *string `type:"string"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // A string, up to one KB in size, that contains the reason a processing job @@ -42245,6 +43612,93 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// Contains information about a target platform that you want your model to +// run on, such as OS, architecture, and accelerators. It is an alternative +// of TargetDevice. +type TargetPlatform struct { + _ struct{} `type:"structure"` + + // Specifies a target platform accelerator (optional). + // + // * NVIDIA: Nvidia graphics processing unit. It also requires gpu-code, + // trt-ver, cuda-ver compiler options + // + // * MALI: ARM Mali graphics processor + // + // * INTEL_GRAPHICS: Integrated Intel graphics + Accelerator *string `type:"string" enum:"TargetPlatformAccelerator"` + + // Specifies a target platform architecture. + // + // * X86_64: 64-bit version of the x86 instruction set. + // + // * X86: 32-bit version of the x86 instruction set. + // + // * ARM64: ARMv8 64-bit CPU. + // + // * ARM_EABIHF: ARMv7 32-bit, Hard Float. + // + // * ARM_EABI: ARMv7 32-bit, Soft Float. Used by Android 32-bit ARM platform. + // + // Arch is a required field + Arch *string `type:"string" required:"true" enum:"TargetPlatformArch"` + + // Specifies a target platform OS. + // + // * LINUX: Linux-based operating systems. + // + // * ANDROID: Android operating systems. Android API level can be specified + // using the ANDROID_PLATFORM compiler option. For example, "CompilerOptions": + // {'ANDROID_PLATFORM': 28} + // + // Os is a required field + Os *string `type:"string" required:"true" enum:"TargetPlatformOs"` +} + +// String returns the string representation +func (s TargetPlatform) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetPlatform) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetPlatform) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetPlatform"} + if s.Arch == nil { + invalidParams.Add(request.NewErrParamRequired("Arch")) + } + if s.Os == nil { + invalidParams.Add(request.NewErrParamRequired("Os")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerator sets the Accelerator field's value. +func (s *TargetPlatform) SetAccelerator(v string) *TargetPlatform { + s.Accelerator = &v + return s +} + +// SetArch sets the Arch field's value. +func (s *TargetPlatform) SetArch(v string) *TargetPlatform { + s.Arch = &v + return s +} + +// SetOs sets the Os field's value. +func (s *TargetPlatform) SetOs(v string) *TargetPlatform { + s.Os = &v + return s +} + // The TensorBoard app settings. type TensorBoardAppSettings struct { _ struct{} `type:"structure"` @@ -42365,7 +43819,14 @@ type TrainingJob struct { // VPC they run in. EnableNetworkIsolation *bool `type:"boolean"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // If the training job failed, the reason it failed. @@ -43346,6 +44807,261 @@ func (s *TransformInput) SetSplitType(v string) *TransformInput { return s } +// A batch transform job. For information about SageMaker batch transform, see +// Use Batch Transform (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html). +type TransformJob struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AutoML job that created the transform + // job. + AutoMLJobArn *string `min:"1" type:"string"` + + // Specifies the number of records to include in a mini-batch for an HTTP inference + // request. A record is a single unit of input data that inference can be made + // on. For example, a single line in a CSV file is a record. + BatchStrategy *string `type:"string" enum:"BatchStrategy"` + + // A timestamp that shows when the transform Job was created. + CreationTime *time.Time `type:"timestamp"` + + // The data structure used to specify the data to be used for inference in a + // batch transform job and to associate the data that is relevant to the prediction + // results in the output. The input filter provided allows you to exclude input + // data that is not needed for inference in a batch transform job. The output + // filter provided allows you to include input data relevant to interpreting + // the predictions in the output from the job. For more information, see Associate + // Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). + DataProcessing *DataProcessing `type:"structure"` + + // The environment variables to set in the Docker container. We support up to + // 16 key and values entries in the map. + Environment map[string]*string `type:"map"` + + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob + ExperimentConfig *ExperimentConfig `type:"structure"` + + // If the transform job failed, the reason it failed. + FailureReason *string `type:"string"` + + // The Amazon Resource Name (ARN) of the labeling job that created the transform + // job. + LabelingJobArn *string `type:"string"` + + // The maximum number of parallel requests that can be sent to each instance + // in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, + // SageMaker checks the optional execution-parameters to determine the settings + // for your chosen algorithm. If the execution-parameters endpoint is not enabled, + // the default value is 1. For built-in algorithms, you don't need to set a + // value for MaxConcurrentTransforms. + MaxConcurrentTransforms *int64 `type:"integer"` + + // The maximum allowed size of the payload, in MB. A payload is the data portion + // of a record (without metadata). The value in MaxPayloadInMB must be greater + // than, or equal to, the size of a single record. To estimate the size of a + // record in MB, divide the size of your dataset by the number of records. To + // ensure that the records fit within the maximum payload size, we recommend + // using a slightly larger value. The default value is 6 MB. For cases where + // the payload might be arbitrarily large and is transmitted using HTTP chunked + // encoding, set the value to 0. This feature works only in supported algorithms. + // Currently, SageMaker built-in algorithms do not support HTTP chunked encoding. + MaxPayloadInMB *int64 `type:"integer"` + + // Configures the timeout and maximum number of retries for processing a transform + // job invocation. + ModelClientConfig *ModelClientConfig `type:"structure"` + + // The name of the model associated with the transform job. + ModelName *string `type:"string"` + + // A list of tags associated with the transform job. + Tags []*Tag `type:"list"` + + // Indicates when the transform job has been completed, or has stopped or failed. + // You are billed for the time interval between this time and the value of TransformStartTime. + TransformEndTime *time.Time `type:"timestamp"` + + // Describes the input source of a transform job and the way the transform job + // consumes it. + TransformInput *TransformInput `type:"structure"` + + // The Amazon Resource Name (ARN) of the transform job. + TransformJobArn *string `type:"string"` + + // The name of the transform job. + TransformJobName *string `min:"1" type:"string"` + + // The status of the transform job. + // + // Transform job statuses are: + // + // * InProgress - The job is in progress. + // + // * Completed - The job has completed. + // + // * Failed - The transform job has failed. To see the reason for the failure, + // see the FailureReason field in the response to a DescribeTransformJob + // call. + // + // * Stopping - The transform job is stopping. + // + // * Stopped - The transform job has stopped. + TransformJobStatus *string `type:"string" enum:"TransformJobStatus"` + + // Describes the results of a transform job. + TransformOutput *TransformOutput `type:"structure"` + + // Describes the resources, including ML instance types and ML instance count, + // to use for transform job. + TransformResources *TransformResources `type:"structure"` + + // Indicates when the transform job starts on ML instances. You are billed for + // the time interval between this time and the value of TransformEndTime. + TransformStartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s TransformJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransformJob) GoString() string { + return s.String() +} + +// SetAutoMLJobArn sets the AutoMLJobArn field's value. +func (s *TransformJob) SetAutoMLJobArn(v string) *TransformJob { + s.AutoMLJobArn = &v + return s +} + +// SetBatchStrategy sets the BatchStrategy field's value. +func (s *TransformJob) SetBatchStrategy(v string) *TransformJob { + s.BatchStrategy = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransformJob) SetCreationTime(v time.Time) *TransformJob { + s.CreationTime = &v + return s +} + +// SetDataProcessing sets the DataProcessing field's value. +func (s *TransformJob) SetDataProcessing(v *DataProcessing) *TransformJob { + s.DataProcessing = v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *TransformJob) SetEnvironment(v map[string]*string) *TransformJob { + s.Environment = v + return s +} + +// SetExperimentConfig sets the ExperimentConfig field's value. +func (s *TransformJob) SetExperimentConfig(v *ExperimentConfig) *TransformJob { + s.ExperimentConfig = v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *TransformJob) SetFailureReason(v string) *TransformJob { + s.FailureReason = &v + return s +} + +// SetLabelingJobArn sets the LabelingJobArn field's value. +func (s *TransformJob) SetLabelingJobArn(v string) *TransformJob { + s.LabelingJobArn = &v + return s +} + +// SetMaxConcurrentTransforms sets the MaxConcurrentTransforms field's value. +func (s *TransformJob) SetMaxConcurrentTransforms(v int64) *TransformJob { + s.MaxConcurrentTransforms = &v + return s +} + +// SetMaxPayloadInMB sets the MaxPayloadInMB field's value. +func (s *TransformJob) SetMaxPayloadInMB(v int64) *TransformJob { + s.MaxPayloadInMB = &v + return s +} + +// SetModelClientConfig sets the ModelClientConfig field's value. +func (s *TransformJob) SetModelClientConfig(v *ModelClientConfig) *TransformJob { + s.ModelClientConfig = v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *TransformJob) SetModelName(v string) *TransformJob { + s.ModelName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransformJob) SetTags(v []*Tag) *TransformJob { + s.Tags = v + return s +} + +// SetTransformEndTime sets the TransformEndTime field's value. +func (s *TransformJob) SetTransformEndTime(v time.Time) *TransformJob { + s.TransformEndTime = &v + return s +} + +// SetTransformInput sets the TransformInput field's value. +func (s *TransformJob) SetTransformInput(v *TransformInput) *TransformJob { + s.TransformInput = v + return s +} + +// SetTransformJobArn sets the TransformJobArn field's value. +func (s *TransformJob) SetTransformJobArn(v string) *TransformJob { + s.TransformJobArn = &v + return s +} + +// SetTransformJobName sets the TransformJobName field's value. +func (s *TransformJob) SetTransformJobName(v string) *TransformJob { + s.TransformJobName = &v + return s +} + +// SetTransformJobStatus sets the TransformJobStatus field's value. +func (s *TransformJob) SetTransformJobStatus(v string) *TransformJob { + s.TransformJobStatus = &v + return s +} + +// SetTransformOutput sets the TransformOutput field's value. +func (s *TransformJob) SetTransformOutput(v *TransformOutput) *TransformJob { + s.TransformOutput = v + return s +} + +// SetTransformResources sets the TransformResources field's value. +func (s *TransformJob) SetTransformResources(v *TransformResources) *TransformJob { + s.TransformResources = v + return s +} + +// SetTransformStartTime sets the TransformStartTime field's value. +func (s *TransformJob) SetTransformStartTime(v time.Time) *TransformJob { + s.TransformStartTime = &v + return s +} + // Defines the input needed to run a transform job using the inference specification // specified in the algorithm. type TransformJobDefinition struct { @@ -43774,7 +45490,7 @@ type TransformS3DataSource struct { // manifest is an S3 object which is a JSON file with the following format: // [ {"prefix": "s3://customer_bucket/some/prefix/"}, "relative/path/to/custdata-1", // "relative/path/custdata-2", ... "relative/path/custdata-N" ] The preceding - // JSON matches the following s3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 + // JSON matches the following S3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 // s3://customer_bucket/some/prefix/relative/path/custdata-2 ... s3://customer_bucket/some/prefix/relative/path/custdata-N // The complete set of S3Uris in this manifest constitutes the input data // for the channel for this datasource. The object that each S3Uris points @@ -44425,6 +46141,9 @@ type TrialComponentSourceDetail struct { // Information about a training job that's the source of a trial component. TrainingJob *TrainingJob `type:"structure"` + + // Information about a transform job that's the source of the trial component. + TransformJob *TransformJob `type:"structure"` } // String returns the string representation @@ -44455,6 +46174,12 @@ func (s *TrialComponentSourceDetail) SetTrainingJob(v *TrainingJob) *TrialCompon return s } +// SetTransformJob sets the TransformJob field's value. +func (s *TrialComponentSourceDetail) SetTransformJob(v *TransformJob) *TrialComponentSourceDetail { + s.TransformJob = v + return s +} + // The status of the trial component. type TrialComponentStatus struct { _ struct{} `type:"structure"` @@ -44805,11 +46530,13 @@ type UiConfig struct { // for labeling job tasks. // // Use this parameter when you are creating a labeling job for 3D point cloud - // labeling modalities. Use your labeling job task type to select one of the - // following ARN's and use it with this parameter when you create a labeling + // and video fram labeling jobs. Use your labeling job task type to select one + // of the following ARN's and use it with this parameter when you create a labeling // job. Replace aws-region with the AWS region you are creating your labeling // job in. // + // 3D Point Cloud HumanTaskUiArns + // // Use this HumanTaskUiArn for 3D point cloud object detection and 3D point // cloud object detection adjustment labeling jobs. // @@ -44824,6 +46551,18 @@ type UiConfig struct { // cloud semantic segmentation adjustment labeling jobs. // // * arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation + // + // Video Frame HumanTaskUiArns + // + // Use this HumanTaskUiArn for video frame object detection and video frame + // object detection adjustment labeling jobs. + // + // * arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection + // + // Use this HumanTaskUiArn for video frame object tracking and video frame object + // tracking adjustment labeling jobs. + // + // * arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking HumanTaskUiArn *string `type:"string"` // The Amazon S3 bucket location of the UI template, or worker task template. @@ -46119,6 +47858,10 @@ func (s *UpdateUserProfileOutput) SetUserProfileArn(v string) *UpdateUserProfile type UpdateWorkforceInput struct { _ struct{} `type:"structure"` + // Use this parameter to update your OIDC Identity Provider (IdP) configuration + // for a workforce made using your own IdP. + OidcConfig *OidcConfig `type:"structure"` + // A list of one to ten worker IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) // that can be used to access tasks assigned to this workforce. // @@ -46152,6 +47895,11 @@ func (s *UpdateWorkforceInput) Validate() error { if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) } + if s.OidcConfig != nil { + if err := s.OidcConfig.Validate(); err != nil { + invalidParams.AddNested("OidcConfig", err.(request.ErrInvalidParams)) + } + } if s.SourceIpConfig != nil { if err := s.SourceIpConfig.Validate(); err != nil { invalidParams.AddNested("SourceIpConfig", err.(request.ErrInvalidParams)) @@ -46164,6 +47912,12 @@ func (s *UpdateWorkforceInput) Validate() error { return nil } +// SetOidcConfig sets the OidcConfig field's value. +func (s *UpdateWorkforceInput) SetOidcConfig(v *OidcConfig) *UpdateWorkforceInput { + s.OidcConfig = v + return s +} + // SetSourceIpConfig sets the SourceIpConfig field's value. func (s *UpdateWorkforceInput) SetSourceIpConfig(v *SourceIpConfig) *UpdateWorkforceInput { s.SourceIpConfig = v @@ -46624,23 +48378,34 @@ func (s *VpcConfig) SetSubnets(v []*string) *VpcConfig { type Workforce struct { _ struct{} `type:"structure"` + // The configuration of an Amazon Cognito workforce. A single Cognito workforce + // is created using and corresponds to a single Amazon Cognito user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + CognitoConfig *CognitoConfig `type:"structure"` + + // The date that the workforce is created. + CreateDate *time.Time `type:"timestamp"` + // The most recent date that was used to successfully add one or more IP address // ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) // to a private workforce's allow list. LastUpdatedDate *time.Time `type:"timestamp"` + // The configuration of an OIDC Identity Provider (IdP) private workforce. + OidcConfig *OidcConfigForResponse `type:"structure"` + // A list of one to ten IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) // to be added to the workforce allow list. SourceIpConfig *SourceIpConfig `type:"structure"` + // The subdomain for your OIDC Identity Provider. + SubDomain *string `type:"string"` + // The Amazon Resource Name (ARN) of the private workforce. // // WorkforceArn is a required field WorkforceArn *string `type:"string" required:"true"` - // The name of the private workforce whose access you want to restrict. WorkforceName - // is automatically set to default when a workforce is created and cannot be - // modified. + // The name of the private workforce. // // WorkforceName is a required field WorkforceName *string `min:"1" type:"string" required:"true"` @@ -46656,18 +48421,42 @@ func (s Workforce) GoString() string { return s.String() } +// SetCognitoConfig sets the CognitoConfig field's value. +func (s *Workforce) SetCognitoConfig(v *CognitoConfig) *Workforce { + s.CognitoConfig = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *Workforce) SetCreateDate(v time.Time) *Workforce { + s.CreateDate = &v + return s +} + // SetLastUpdatedDate sets the LastUpdatedDate field's value. func (s *Workforce) SetLastUpdatedDate(v time.Time) *Workforce { s.LastUpdatedDate = &v return s } +// SetOidcConfig sets the OidcConfig field's value. +func (s *Workforce) SetOidcConfig(v *OidcConfigForResponse) *Workforce { + s.OidcConfig = v + return s +} + // SetSourceIpConfig sets the SourceIpConfig field's value. func (s *Workforce) SetSourceIpConfig(v *SourceIpConfig) *Workforce { s.SourceIpConfig = v return s } +// SetSubDomain sets the SubDomain field's value. +func (s *Workforce) SetSubDomain(v string) *Workforce { + s.SubDomain = &v + return s +} + // SetWorkforceArn sets the WorkforceArn field's value. func (s *Workforce) SetWorkforceArn(v string) *Workforce { s.WorkforceArn = &v @@ -46711,6 +48500,9 @@ type Workteam struct { // labeling your data objects. SubDomain *string `type:"string"` + // The Amazon Resource Name (ARN) of the workforce. + WorkforceArn *string `type:"string"` + // The Amazon Resource Name (ARN) that identifies the work team. // // WorkteamArn is a required field @@ -46774,6 +48566,12 @@ func (s *Workteam) SetSubDomain(v string) *Workteam { return s } +// SetWorkforceArn sets the WorkforceArn field's value. +func (s *Workteam) SetWorkforceArn(v string) *Workteam { + s.WorkforceArn = &v + return s +} + // SetWorkteamArn sets the WorkteamArn field's value. func (s *Workteam) SetWorkteamArn(v string) *Workteam { s.WorkteamArn = &v @@ -47570,6 +49368,9 @@ const ( ) const ( + // LabelingJobStatusInitializing is a LabelingJobStatus enum value + LabelingJobStatusInitializing = "Initializing" + // LabelingJobStatusInProgress is a LabelingJobStatus enum value LabelingJobStatusInProgress = "InProgress" @@ -47602,6 +49403,14 @@ const ( ListLabelingJobsForWorkteamSortByOptionsCreationTime = "CreationTime" ) +const ( + // ListWorkforcesSortByOptionsName is a ListWorkforcesSortByOptions enum value + ListWorkforcesSortByOptionsName = "Name" + + // ListWorkforcesSortByOptionsCreateDate is a ListWorkforcesSortByOptions enum value + ListWorkforcesSortByOptionsCreateDate = "CreateDate" +) + const ( // ListWorkteamsSortByOptionsName is a ListWorkteamsSortByOptions enum value ListWorkteamsSortByOptionsName = "Name" @@ -48445,6 +50254,9 @@ const ( // TargetDeviceMlP3 is a TargetDevice enum value TargetDeviceMlP3 = "ml_p3" + // TargetDeviceMlG4dn is a TargetDevice enum value + TargetDeviceMlG4dn = "ml_g4dn" + // TargetDeviceMlInf1 is a TargetDevice enum value TargetDeviceMlInf1 = "ml_inf1" @@ -48492,6 +50304,48 @@ const ( // TargetDeviceAmbaCv22 is a TargetDevice enum value TargetDeviceAmbaCv22 = "amba_cv22" + + // TargetDeviceX86Win32 is a TargetDevice enum value + TargetDeviceX86Win32 = "x86_win32" + + // TargetDeviceX86Win64 is a TargetDevice enum value + TargetDeviceX86Win64 = "x86_win64" +) + +const ( + // TargetPlatformAcceleratorIntelGraphics is a TargetPlatformAccelerator enum value + TargetPlatformAcceleratorIntelGraphics = "INTEL_GRAPHICS" + + // TargetPlatformAcceleratorMali is a TargetPlatformAccelerator enum value + TargetPlatformAcceleratorMali = "MALI" + + // TargetPlatformAcceleratorNvidia is a TargetPlatformAccelerator enum value + TargetPlatformAcceleratorNvidia = "NVIDIA" +) + +const ( + // TargetPlatformArchX8664 is a TargetPlatformArch enum value + TargetPlatformArchX8664 = "X86_64" + + // TargetPlatformArchX86 is a TargetPlatformArch enum value + TargetPlatformArchX86 = "X86" + + // TargetPlatformArchArm64 is a TargetPlatformArch enum value + TargetPlatformArchArm64 = "ARM64" + + // TargetPlatformArchArmEabi is a TargetPlatformArch enum value + TargetPlatformArchArmEabi = "ARM_EABI" + + // TargetPlatformArchArmEabihf is a TargetPlatformArch enum value + TargetPlatformArchArmEabihf = "ARM_EABIHF" +) + +const ( + // TargetPlatformOsAndroid is a TargetPlatformOs enum value + TargetPlatformOsAndroid = "ANDROID" + + // TargetPlatformOsLinux is a TargetPlatformOs enum value + TargetPlatformOsLinux = "LINUX" ) const ( diff --git a/service/sagemaker/sagemakeriface/interface.go b/service/sagemaker/sagemakeriface/interface.go index e928be3c75a..58e42a3cc66 100644 --- a/service/sagemaker/sagemakeriface/interface.go +++ b/service/sagemaker/sagemakeriface/interface.go @@ -172,6 +172,10 @@ type SageMakerAPI interface { CreateUserProfileWithContext(aws.Context, *sagemaker.CreateUserProfileInput, ...request.Option) (*sagemaker.CreateUserProfileOutput, error) CreateUserProfileRequest(*sagemaker.CreateUserProfileInput) (*request.Request, *sagemaker.CreateUserProfileOutput) + CreateWorkforce(*sagemaker.CreateWorkforceInput) (*sagemaker.CreateWorkforceOutput, error) + CreateWorkforceWithContext(aws.Context, *sagemaker.CreateWorkforceInput, ...request.Option) (*sagemaker.CreateWorkforceOutput, error) + CreateWorkforceRequest(*sagemaker.CreateWorkforceInput) (*request.Request, *sagemaker.CreateWorkforceOutput) + CreateWorkteam(*sagemaker.CreateWorkteamInput) (*sagemaker.CreateWorkteamOutput, error) CreateWorkteamWithContext(aws.Context, *sagemaker.CreateWorkteamInput, ...request.Option) (*sagemaker.CreateWorkteamOutput, error) CreateWorkteamRequest(*sagemaker.CreateWorkteamInput) (*request.Request, *sagemaker.CreateWorkteamOutput) @@ -248,6 +252,10 @@ type SageMakerAPI interface { DeleteUserProfileWithContext(aws.Context, *sagemaker.DeleteUserProfileInput, ...request.Option) (*sagemaker.DeleteUserProfileOutput, error) DeleteUserProfileRequest(*sagemaker.DeleteUserProfileInput) (*request.Request, *sagemaker.DeleteUserProfileOutput) + DeleteWorkforce(*sagemaker.DeleteWorkforceInput) (*sagemaker.DeleteWorkforceOutput, error) + DeleteWorkforceWithContext(aws.Context, *sagemaker.DeleteWorkforceInput, ...request.Option) (*sagemaker.DeleteWorkforceOutput, error) + DeleteWorkforceRequest(*sagemaker.DeleteWorkforceInput) (*request.Request, *sagemaker.DeleteWorkforceOutput) + DeleteWorkteam(*sagemaker.DeleteWorkteamInput) (*sagemaker.DeleteWorkteamOutput, error) DeleteWorkteamWithContext(aws.Context, *sagemaker.DeleteWorkteamInput, ...request.Option) (*sagemaker.DeleteWorkteamOutput, error) DeleteWorkteamRequest(*sagemaker.DeleteWorkteamInput) (*request.Request, *sagemaker.DeleteWorkteamOutput) @@ -578,6 +586,13 @@ type SageMakerAPI interface { ListUserProfilesPages(*sagemaker.ListUserProfilesInput, func(*sagemaker.ListUserProfilesOutput, bool) bool) error ListUserProfilesPagesWithContext(aws.Context, *sagemaker.ListUserProfilesInput, func(*sagemaker.ListUserProfilesOutput, bool) bool, ...request.Option) error + ListWorkforces(*sagemaker.ListWorkforcesInput) (*sagemaker.ListWorkforcesOutput, error) + ListWorkforcesWithContext(aws.Context, *sagemaker.ListWorkforcesInput, ...request.Option) (*sagemaker.ListWorkforcesOutput, error) + ListWorkforcesRequest(*sagemaker.ListWorkforcesInput) (*request.Request, *sagemaker.ListWorkforcesOutput) + + ListWorkforcesPages(*sagemaker.ListWorkforcesInput, func(*sagemaker.ListWorkforcesOutput, bool) bool) error + ListWorkforcesPagesWithContext(aws.Context, *sagemaker.ListWorkforcesInput, func(*sagemaker.ListWorkforcesOutput, bool) bool, ...request.Option) error + ListWorkteams(*sagemaker.ListWorkteamsInput) (*sagemaker.ListWorkteamsOutput, error) ListWorkteamsWithContext(aws.Context, *sagemaker.ListWorkteamsInput, ...request.Option) (*sagemaker.ListWorkteamsOutput, error) ListWorkteamsRequest(*sagemaker.ListWorkteamsInput) (*request.Request, *sagemaker.ListWorkteamsOutput)