From dad4b722853265d5287566f716809f575ab45a7c Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Mon, 7 Aug 2023 18:11:20 +0000 Subject: [PATCH] Update API model --- codegen/sdk-codegen/aws-models/detective.json | 346 ++++---- .../sdk-codegen/aws-models/ivs-realtime.json | 135 ++-- .../kinesis-video-archived-media.json | 697 +++++++++------- .../sdk-codegen/aws-models/kinesis-video.json | 354 ++++----- .../sdk-codegen/aws-models/rekognition.json | 745 +++++++++++------- 5 files changed, 1264 insertions(+), 1013 deletions(-) diff --git a/codegen/sdk-codegen/aws-models/detective.json b/codegen/sdk-codegen/aws-models/detective.json index 1867234c9c5..9ec12357bb0 100644 --- a/codegen/sdk-codegen/aws-models/detective.json +++ b/codegen/sdk-codegen/aws-models/detective.json @@ -366,52 +366,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -419,13 +423,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -435,224 +448,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://api.detective-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://api.detective.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://api.detective.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] }, @@ -1988,7 +1952,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,6}$" + "smithy.api#pattern": "^.+@(?:(?:(?!-)[A-Za-z0-9-]{1,62})?[A-Za-z0-9]{1}\\.)+[A-Za-z]{2,63}$" } }, "com.amazonaws.detective#EmailMessage": { diff --git a/codegen/sdk-codegen/aws-models/ivs-realtime.json b/codegen/sdk-codegen/aws-models/ivs-realtime.json index cdffbf491c0..66fc64bb869 100644 --- a/codegen/sdk-codegen/aws-models/ivs-realtime.json +++ b/codegen/sdk-codegen/aws-models/ivs-realtime.json @@ -98,7 +98,7 @@ "date" ] }, - "smithy.api#documentation": "

\n Introduction\n

\n

The Amazon Interactive Video Service (IVS) stage API is REST compatible, using a standard HTTP \n\t API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, \n\t including errors.\n

\n

Terminology:

\n \n

\n Resources\n

\n

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

\n \n

\n Tagging\n

\n

A tag is a metadata label that you assign to an AWS resource. A tag\n comprises a key and a value, both set by you. For\n example, you might set a tag as topic:nature to label a particular video\n category. See Tagging AWS Resources for more information, including restrictions that apply to\n tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific\n constraints beyond what is documented there.

\n

Tags can help you identify and organize your AWS resources. For example, you can use the\n same tag for different resources to indicate that they are related. You can also use tags to\n manage access (see Access Tags).

\n

The Amazon IVS stage API has these tag-related endpoints: TagResource, UntagResource, and\n ListTagsForResource. The following resource supports tagging: Stage.

\n

At most 50 tags can be applied to a resource.

\n

\n Stages Endpoints\n

\n \n

\n Tags Endpoints\n

\n ", + "smithy.api#documentation": "

\n Introduction\n

\n

The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP \n\t API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, \n\t including errors.\n

\n

Terminology:

\n \n

\n Resources\n

\n

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS Real-Time Streaming):

\n \n

\n Tagging\n

\n

A tag is a metadata label that you assign to an AWS resource. A tag\n comprises a key and a value, both set by you. For\n example, you might set a tag as topic:nature to label a particular video\n category. See Tagging AWS Resources for more information, including restrictions that apply to\n tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific\n constraints beyond what is documented there.

\n

Tags can help you identify and organize your AWS resources. For example, you can use the\n same tag for different resources to indicate that they are related. You can also use tags to\n manage access (see Access Tags).

\n

The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and\n ListTagsForResource. The following resource supports tagging: Stage.

\n

At most 50 tags can be applied to a resource.

\n

\n Stages Endpoints\n

\n \n

\n Tags Endpoints\n

\n ", "smithy.api#title": "Amazon Interactive Video Service RealTime", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -453,54 +453,54 @@ "smithy.rules#endpointTests": { "testCases": [ { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-gov-east-1.api.aws" + "url": "https://ivsrealtime-fips.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-gov-east-1.amazonaws.com" + "url": "https://ivsrealtime-fips.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "Region": "us-gov-east-1", "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-gov-east-1.api.aws" + "url": "https://ivsrealtime.us-east-1.api.aws" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1", "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-gov-east-1.amazonaws.com" + "url": "https://ivsrealtime.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "Region": "us-gov-east-1", "UseDualStack": false } }, @@ -512,8 +512,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -525,8 +525,8 @@ } }, "params": { - "UseFIPS": true, "Region": "cn-north-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -538,8 +538,8 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -551,108 +551,108 @@ } }, "params": { - "UseFIPS": false, "Region": "cn-north-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://ivsrealtime-fips.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://ivsrealtime-fips.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://ivsrealtime.us-gov-east-1.api.aws" + } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-iso-east-1.c2s.ic.gov" + "url": "https://ivsrealtime.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "Region": "us-iso-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://ivsrealtime-fips.us-east-1.api.aws" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime-fips.us-east-1.amazonaws.com" + "url": "https://ivsrealtime-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://ivsrealtime.us-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ivsrealtime.us-east-1.amazonaws.com" + "url": "https://ivsrealtime.us-iso-east-1.c2s.ic.gov" } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false } }, @@ -662,8 +662,8 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": true } }, @@ -675,8 +675,8 @@ } }, "params": { - "UseFIPS": true, "Region": "us-isob-east-1", + "UseFIPS": true, "UseDualStack": false } }, @@ -686,8 +686,8 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": true } }, @@ -699,21 +699,34 @@ } }, "params": { - "UseFIPS": false, "Region": "us-isob-east-1", + "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -724,8 +737,8 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false, "Endpoint": "https://example.com" } @@ -736,11 +749,17 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" @@ -1061,7 +1080,7 @@ "errorCode": { "target": "com.amazonaws.ivsrealtime#EventErrorCode", "traits": { - "smithy.api#documentation": "

If the event is an error event, the error code is provided to give insight into the\n specific error that occurred. If the event is not an error event, this field is null.\n INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the\n participant’s token is not allowed to do. For more information about participant\n capabilities, see the capabilities field in CreateParticipantToken.

" + "smithy.api#documentation": "

If the event is an error event, the error code is provided to give insight into the\n specific error that occurred. If the event is not an error event, this field is null.\n INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the\n participant’s token is not allowed to do. For more information about participant\n capabilities, see the capabilities field in CreateParticipantToken. \n\t\t QUOTA_EXCEEDED indicates that the number of participants who want to publish/subscribe to a \n\t\t stage exceeds the quota; for more information, see Service Quotas.\n\t\t PUBLISHER_NOT_FOUND indicates that the participant tried to subscribe to a publisher\n\t\t that doesn’t exist. \n

" } } }, @@ -1076,6 +1095,14 @@ { "value": "INSUFFICIENT_CAPABILITIES", "name": "INSUFFICIENT_CAPABILITIES" + }, + { + "value": "QUOTA_EXCEEDED", + "name": "QUOTA_EXCEEDED" + }, + { + "value": "PUBLISHER_NOT_FOUND", + "name": "PUBLISHER_NOT_FOUND" } ] } diff --git a/codegen/sdk-codegen/aws-models/kinesis-video-archived-media.json b/codegen/sdk-codegen/aws-models/kinesis-video-archived-media.json index 43a7612d231..58f4391edec 100644 --- a/codegen/sdk-codegen/aws-models/kinesis-video-archived-media.json +++ b/codegen/sdk-codegen/aws-models/kinesis-video-archived-media.json @@ -427,8 +427,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -440,8 +440,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -453,8 +453,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -466,8 +466,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -479,8 +479,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -492,8 +492,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -505,8 +505,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -518,8 +518,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -531,8 +531,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -544,8 +544,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -557,8 +557,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -570,8 +570,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -583,8 +583,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -596,8 +596,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -609,8 +609,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -622,8 +622,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -635,8 +635,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -648,8 +648,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -661,8 +661,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -674,8 +674,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -687,8 +687,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -700,8 +700,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -713,8 +713,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -726,8 +726,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -739,8 +739,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -752,8 +752,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -765,8 +765,19 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -778,8 +789,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -791,8 +813,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -804,8 +837,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -817,8 +861,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -830,8 +874,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -843,8 +887,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -855,8 +899,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -867,10 +911,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" @@ -909,22 +959,24 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the timestamp range and timestamp origin of a range of fragments.

\n

Fragments that have duplicate producer timestamps are deduplicated. This means that if\n producers are producing a stream of fragments with producer timestamps that are\n approximately equal to the true clock time, the clip will contain all of the fragments\n within the requested timestamp range. If some fragments are ingested within the same\n time range and very different points in time, only the oldest ingested collection of\n fragments are returned.

" + "smithy.api#documentation": "

Describes the timestamp range and timestamp origin of a range of fragments.

\n

Fragments that have duplicate producer timestamps are deduplicated. This means that if\n producers are producing a stream of fragments with producer timestamps that are\n approximately equal to the true clock time, the clip will contain all of the fragments\n within the requested timestamp range. If some fragments are ingested within the same\n time range and very different points in time, only the oldest ingested collection of\n fragments are returned.

" } }, "com.amazonaws.kinesisvideoarchivedmedia#ClipFragmentSelectorType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PRODUCER_TIMESTAMP", - "name": "PRODUCER_TIMESTAMP" - }, - { - "value": "SERVER_TIMESTAMP", - "name": "SERVER_TIMESTAMP" + "type": "enum", + "members": { + "PRODUCER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRODUCER_TIMESTAMP" + } + }, + "SERVER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_TIMESTAMP" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#ClipTimestampRange": { @@ -933,14 +985,14 @@ "StartTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The starting timestamp in the range of timestamps for which to return fragments.

\n

Only fragments that start exactly at or after StartTimestamp are included\n in the session. Fragments that start before StartTimestamp and continue\n past it aren't included in the session. If FragmentSelectorType is\n SERVER_TIMESTAMP, the StartTimestamp must be later than\n the stream head.

", + "smithy.api#documentation": "

The starting timestamp in the range of timestamps for which to return fragments.

\n

Only fragments that start exactly at or after StartTimestamp are included\n in the session. Fragments that start before StartTimestamp and continue\n past it aren't included in the session. If FragmentSelectorType is\n SERVER_TIMESTAMP, the StartTimestamp must be later than\n the stream head.

", "smithy.api#required": {} } }, "EndTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the timestamp range for the requested media.

\n

This value must be within 24 hours of the specified StartTimestamp, and\n it must be later than the StartTimestamp value. If\n FragmentSelectorType for the request is SERVER_TIMESTAMP,\n this value must be in the past.

\n

This value is inclusive. The EndTimestamp is compared to the (starting)\n timestamp of the fragment. Fragments that start before the EndTimestamp\n value and continue past it are included in the session.

", + "smithy.api#documentation": "

The end of the timestamp range for the requested media.

\n

This value must be within 24 hours of the specified StartTimestamp, and\n it must be later than the StartTimestamp value. If\n FragmentSelectorType for the request is SERVER_TIMESTAMP,\n this value must be in the past.

\n

This value is inclusive. The EndTimestamp is compared to the (starting)\n timestamp of the fragment. Fragments that start before the EndTimestamp\n value and continue past it are included in the session.

", "smithy.api#required": {} } } @@ -950,18 +1002,20 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#ContainerFormat": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "FRAGMENTED_MP4", - "name": "FRAGMENTED_MP4" - }, - { - "value": "MPEG_TS", - "name": "MPEG_TS" + "type": "enum", + "members": { + "FRAGMENTED_MP4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FRAGMENTED_MP4" + } + }, + "MPEG_TS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MPEG_TS" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#ContentType": { @@ -975,33 +1029,37 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#DASHDisplayFragmentNumber": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "ALWAYS", - "name": "ALWAYS" - }, - { - "value": "NEVER", - "name": "NEVER" + "type": "enum", + "members": { + "ALWAYS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALWAYS" + } + }, + "NEVER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEVER" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#DASHDisplayFragmentTimestamp": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "ALWAYS", - "name": "ALWAYS" - }, - { - "value": "NEVER", - "name": "NEVER" + "type": "enum", + "members": { + "ALWAYS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALWAYS" } - ] + }, + "NEVER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEVER" + } + } } }, "com.amazonaws.kinesisvideoarchivedmedia#DASHFragmentSelector": { @@ -1010,13 +1068,13 @@ "FragmentSelectorType": { "target": "com.amazonaws.kinesisvideoarchivedmedia#DASHFragmentSelectorType", "traits": { - "smithy.api#documentation": "

The source of the timestamps for the requested media.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetDASHStreamingSessionURLInput$PlaybackMode is\n ON_DEMAND or LIVE_REPLAY, the first fragment ingested with\n a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In\n addition, the fragments with producer timestamps within the TimestampRange\n ingested immediately following the first fragment (up to the GetDASHStreamingSessionURLInput$MaxManifestFragmentResults value) are\n included.

\n

Fragments that have duplicate producer timestamps are deduplicated. This means that if\n producers are producing a stream of fragments with producer timestamps that are\n approximately equal to the true clock time, the MPEG-DASH manifest will contain all of\n the fragments within the requested timestamp range. If some fragments are ingested\n within the same time range and very different points in time, only the oldest ingested\n collection of fragments are returned.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetDASHStreamingSessionURLInput$PlaybackMode is LIVE,\n the producer timestamps are used in the MP4 fragments and for deduplication. But the\n most recently ingested fragments based on server timestamps are included in the\n MPEG-DASH manifest. This means that even if fragments ingested in the past have producer\n timestamps with values now, they are not included in the HLS media playlist.

\n

The default is SERVER_TIMESTAMP.

" + "smithy.api#documentation": "

The source of the timestamps for the requested media.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetDASHStreamingSessionURLInput$PlaybackMode is\n ON_DEMAND or LIVE_REPLAY, the first fragment ingested with\n a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In\n addition, the fragments with producer timestamps within the TimestampRange\n ingested immediately following the first fragment (up to the GetDASHStreamingSessionURLInput$MaxManifestFragmentResults value) are\n included.

\n

Fragments that have duplicate producer timestamps are deduplicated. This means that if\n producers are producing a stream of fragments with producer timestamps that are\n approximately equal to the true clock time, the MPEG-DASH manifest will contain all of\n the fragments within the requested timestamp range. If some fragments are ingested\n within the same time range and very different points in time, only the oldest ingested\n collection of fragments are returned.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetDASHStreamingSessionURLInput$PlaybackMode is LIVE,\n the producer timestamps are used in the MP4 fragments and for deduplication. But the\n most recently ingested fragments based on server timestamps are included in the\n MPEG-DASH manifest. This means that even if fragments ingested in the past have producer\n timestamps with values now, they are not included in the HLS media playlist.

\n

The default is SERVER_TIMESTAMP.

" } }, "TimestampRange": { "target": "com.amazonaws.kinesisvideoarchivedmedia#DASHTimestampRange", "traits": { - "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

" + "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

" } } }, @@ -1025,18 +1083,20 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#DASHFragmentSelectorType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PRODUCER_TIMESTAMP", - "name": "PRODUCER_TIMESTAMP" - }, - { - "value": "SERVER_TIMESTAMP", - "name": "SERVER_TIMESTAMP" + "type": "enum", + "members": { + "PRODUCER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRODUCER_TIMESTAMP" + } + }, + "SERVER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_TIMESTAMP" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#DASHMaxResults": { @@ -1049,22 +1109,26 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#DASHPlaybackMode": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "LIVE", - "name": "LIVE" - }, - { - "value": "LIVE_REPLAY", - "name": "LIVE_REPLAY" - }, - { - "value": "ON_DEMAND", - "name": "ON_DEMAND" + "type": "enum", + "members": { + "LIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LIVE" } - ] + }, + "LIVE_REPLAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LIVE_REPLAY" + } + }, + "ON_DEMAND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ON_DEMAND" + } + } } }, "com.amazonaws.kinesisvideoarchivedmedia#DASHStreamingSessionURL": { @@ -1076,18 +1140,18 @@ "StartTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the timestamp range for the requested media.

\n

If the DASHTimestampRange value is specified, the\n StartTimestamp value is required.

\n

Only fragments that start exactly at or after StartTimestamp are included\n in the session. Fragments that start before StartTimestamp and continue\n past it aren't included in the session. If FragmentSelectorType is\n SERVER_TIMESTAMP, the StartTimestamp must be later than\n the stream head.

" + "smithy.api#documentation": "

The start of the timestamp range for the requested media.

\n

If the DASHTimestampRange value is specified, the\n StartTimestamp value is required.

\n

Only fragments that start exactly at or after StartTimestamp are included\n in the session. Fragments that start before StartTimestamp and continue\n past it aren't included in the session. If FragmentSelectorType is\n SERVER_TIMESTAMP, the StartTimestamp must be later than\n the stream head.

" } }, "EndTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the timestamp range for the requested media. This value must be within 24\n hours of the specified StartTimestamp, and it must be later than the\n StartTimestamp value.

\n

If FragmentSelectorType for the request is SERVER_TIMESTAMP,\n this value must be in the past.

\n\n

The EndTimestamp value is required for ON_DEMAND mode, but\n optional for LIVE_REPLAY mode. If the EndTimestamp is not set\n for LIVE_REPLAY mode then the session will continue to include newly\n ingested fragments until the session expires.

\n \n

This value is inclusive. The EndTimestamp is compared to the\n (starting) timestamp of the fragment. Fragments that start before the\n EndTimestamp value and continue past it are included in the\n session.

\n
" + "smithy.api#documentation": "

The end of the timestamp range for the requested media. This value must be within 24\n hours of the specified StartTimestamp, and it must be later than the\n StartTimestamp value.

\n

If FragmentSelectorType for the request is SERVER_TIMESTAMP,\n this value must be in the past.

\n

The EndTimestamp value is required for ON_DEMAND mode, but\n optional for LIVE_REPLAY mode. If the EndTimestamp is not set\n for LIVE_REPLAY mode then the session will continue to include newly\n ingested fragments until the session expires.

\n \n

This value is inclusive. The EndTimestamp is compared to the\n (starting) timestamp of the fragment. Fragments that start before the\n EndTimestamp value and continue past it are included in the\n session.

\n
" } } }, "traits": { - "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

\n

The values in DASHimestampRange are inclusive. Fragments that start\n exactly at or after the start time are included in the session. Fragments that start\n before the start time and continue past it are not included in the session.

" + "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

\n

The values in DASHimestampRange are inclusive. Fragments that start\n exactly at or after the start time are included in the session. Fragments that start\n before the start time and continue past it are not included in the session.

" } }, "com.amazonaws.kinesisvideoarchivedmedia#ErrorMessage": { @@ -1103,18 +1167,20 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#Format": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "JPEG", - "name": "JPEG" - }, - { - "value": "PNG", - "name": "PNG" + "type": "enum", + "members": { + "JPEG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JPEG" + } + }, + "PNG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PNG" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#FormatConfig": { @@ -1133,14 +1199,14 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#FormatConfigKey": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "JPEGQuality", - "name": "JPEGQuality" + "type": "enum", + "members": { + "JPEGQuality": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JPEGQuality" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#FormatConfigValue": { @@ -1178,7 +1244,7 @@ "ServerTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The timestamp from the AWS server corresponding to the fragment.

" + "smithy.api#documentation": "

The timestamp from the Amazon Web Services server corresponding to the fragment.

" } }, "FragmentLengthInMilliseconds": { @@ -1240,22 +1306,24 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the timestamp range and timestamp origin of a range of fragments.

\n

Only fragments with a start timestamp greater than or equal to the given start time\n and less than or equal to the end time are returned. For example, if a stream contains\n fragments with the following start timestamps:

\n \n

A fragment selector range with a start time of 00:00:01 and end time of 00:00:04\n would return the fragments with start times of 00:00:02 and 00:00:04.

" + "smithy.api#documentation": "

Describes the timestamp range and timestamp origin of a range of fragments.

\n

Only fragments with a start timestamp greater than or equal to the given start time\n and less than or equal to the end time are returned. For example, if a stream contains\n fragments with the following start timestamps:

\n \n

A fragment selector range with a start time of 00:00:01 and end time of 00:00:04\n would return the fragments with start times of 00:00:02 and 00:00:04.

" } }, "com.amazonaws.kinesisvideoarchivedmedia#FragmentSelectorType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PRODUCER_TIMESTAMP", - "name": "PRODUCER_TIMESTAMP" - }, - { - "value": "SERVER_TIMESTAMP", - "name": "SERVER_TIMESTAMP" + "type": "enum", + "members": { + "PRODUCER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRODUCER_TIMESTAMP" + } + }, + "SERVER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_TIMESTAMP" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#GetClip": { @@ -1296,7 +1364,7 @@ } ], "traits": { - "smithy.api#documentation": "

Downloads an MP4 file (clip) containing the archived, on-demand media from the\n specified video stream over the specified time range.

\n

Both the StreamName and the StreamARN parameters are optional, but you must specify\n either the StreamName or the StreamARN when invoking this API operation.

\n\n

As a prerequisite to using GetCLip API, you must obtain an endpoint using\n GetDataEndpoint, specifying GET_CLIP for the\n APIName parameter.

\n

An Amazon Kinesis video stream has the following requirements for providing data\n through MP4:

\n \n\n

You can monitor the amount of outgoing data by monitoring the\n GetClip.OutgoingBytes Amazon CloudWatch metric. For information about\n using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video\n Streams Pricing and AWS\n Pricing. Charges for outgoing AWS data apply.

", + "smithy.api#documentation": "

Downloads an MP4 file (clip) containing the archived, on-demand media from the\n specified video stream over the specified time range.

\n

Both the StreamName and the StreamARN parameters are optional, but you must specify\n either the StreamName or the StreamARN when invoking this API operation.

\n

As a prerequisite to using GetCLip API, you must obtain an endpoint using\n GetDataEndpoint, specifying GET_CLIP for the\n APIName parameter.

\n

An Amazon Kinesis video stream has the following requirements for providing data\n through MP4:

\n \n

You can monitor the amount of outgoing data by monitoring the\n GetClip.OutgoingBytes Amazon CloudWatch metric. For information about\n using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video\n Streams Pricing and Amazon Web Services\n Pricing. Charges for outgoing Amazon Web Services data apply.

", "smithy.api#http": { "method": "POST", "uri": "/getClip", @@ -1310,13 +1378,13 @@ "StreamName": { "target": "com.amazonaws.kinesisvideoarchivedmedia#StreamName", "traits": { - "smithy.api#documentation": "

The name of the stream for which to retrieve the media clip.

\n

You must specify either the StreamName or the StreamARN.

" + "smithy.api#documentation": "

The name of the stream for which to retrieve the media clip.

\n

You must specify either the StreamName or the StreamARN.

" } }, "StreamARN": { "target": "com.amazonaws.kinesisvideoarchivedmedia#ResourceARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the stream for which to retrieve the media clip.

\n

You must specify either the StreamName or the StreamARN.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the stream for which to retrieve the media clip.

\n

You must specify either the StreamName or the StreamARN.

" } }, "ClipFragmentSelector": { @@ -1326,6 +1394,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetClipOutput": { @@ -1346,6 +1417,9 @@ "smithy.api#httpPayload": {} } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetDASHStreamingSessionURL": { @@ -1383,7 +1457,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You\n can then open the URL in a media player to view the stream contents.

\n \n

Both the StreamName and the StreamARN parameters are\n optional, but you must specify either the StreamName or the\n StreamARN when invoking this API operation.

\n

An Amazon Kinesis video stream has the following requirements for providing data\n through MPEG-DASH:

\n \n\n

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

\n
    \n
  1. \n

    Get an endpoint using GetDataEndpoint, specifying\n GET_DASH_STREAMING_SESSION_URL for the APIName\n parameter.

    \n
  2. \n
  3. \n

    Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL.\n Kinesis Video Streams creates an MPEG-DASH streaming session to be used for\n accessing content in a stream using the MPEG-DASH protocol.\n GetDASHStreamingSessionURL returns an authenticated URL (that\n includes an encrypted session token) for the session's MPEG-DASH\n manifest (the root resource needed for streaming with\n MPEG-DASH).

    \n \n

    Don't share or store this token where an unauthorized entity can access\n it. The token provides access to the content of the stream. Safeguard the\n token with the same measures that you use with your AWS credentials.

    \n
    \n

    The media that is made available through the manifest consists only of the\n requested stream, time range, and format. No other media data (such as frames\n outside the requested window or alternate bitrates) is made available.

    \n
  4. \n
  5. \n

    Provide the URL (containing the encrypted session token) for the MPEG-DASH\n manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video\n Streams makes the initialization fragment and media fragments available through\n the manifest URL. The initialization fragment contains the codec private data\n for the stream, and other data needed to set up the video or audio decoder and\n renderer. The media fragments contain encoded video frames or encoded audio\n samples.

    \n
  6. \n
  7. \n

    The media player receives the authenticated URL and requests stream metadata\n and media data normally. When the media player requests data, it calls the\n following actions:

    \n
      \n
    • \n

      \n GetDASHManifest: Retrieves an MPEG DASH\n manifest, which contains the metadata for the media that you want to\n playback.

      \n
    • \n
    • \n

      \n GetMP4InitFragment: Retrieves the MP4\n initialization fragment. The media player typically loads the\n initialization fragment before loading any media fragments. This\n fragment contains the \"fytp\" and \"moov\" MP4\n atoms, and the child atoms that are needed to initialize the media\n player decoder.

      \n

      The initialization fragment does not correspond to a fragment in a\n Kinesis video stream. It contains only the codec private data for the\n stream and respective track, which the media player needs to decode the\n media frames.

      \n
    • \n
    • \n

      \n GetMP4MediaFragment: Retrieves MP4\n media fragments. These fragments contain the \"moof\" and\n \"mdat\" MP4 atoms and their child atoms, containing the\n encoded fragment's media frames and their timestamps.

      \n \n

      After the first media fragment is made available in a streaming\n session, any fragments that don't contain the same codec private\n data cause an error to be returned when those different media\n fragments are loaded. Therefore, the codec private data should not\n change between fragments in a session. This also means that the\n session fails if the fragments in a stream change from having only\n video to having both audio and video.

      \n
      \n

      Data retrieved with this action is billable. See Pricing for details.

      \n
    • \n
    \n
  8. \n
\n \n

For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.

\n
\n

You can monitor the amount of data that the media player consumes by monitoring the\n GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For\n information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video\n Streams Pricing and AWS\n Pricing. Charges for both HLS sessions and outgoing AWS data apply.

\n

For more information about HLS, see HTTP Live Streaming on the\n Apple Developer site.

\n\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", + "smithy.api#documentation": "

Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You\n can then open the URL in a media player to view the stream contents.

\n

Both the StreamName and the StreamARN parameters are\n optional, but you must specify either the StreamName or the\n StreamARN when invoking this API operation.

\n

An Amazon Kinesis video stream has the following requirements for providing data\n through MPEG-DASH:

\n \n

The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:

\n
    \n
  1. \n

    Get an endpoint using GetDataEndpoint, specifying\n GET_DASH_STREAMING_SESSION_URL for the APIName\n parameter.

    \n
  2. \n
  3. \n

    Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL.\n Kinesis Video Streams creates an MPEG-DASH streaming session to be used for\n accessing content in a stream using the MPEG-DASH protocol.\n GetDASHStreamingSessionURL returns an authenticated URL (that\n includes an encrypted session token) for the session's MPEG-DASH\n manifest (the root resource needed for streaming with\n MPEG-DASH).

    \n \n

    Don't share or store this token where an unauthorized entity can access\n it. The token provides access to the content of the stream. Safeguard the\n token with the same measures that you use with your Amazon Web Services credentials.

    \n
    \n

    The media that is made available through the manifest consists only of the\n requested stream, time range, and format. No other media data (such as frames\n outside the requested window or alternate bitrates) is made available.

    \n
  4. \n
  5. \n

    Provide the URL (containing the encrypted session token) for the MPEG-DASH\n manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video\n Streams makes the initialization fragment and media fragments available through\n the manifest URL. The initialization fragment contains the codec private data\n for the stream, and other data needed to set up the video or audio decoder and\n renderer. The media fragments contain encoded video frames or encoded audio\n samples.

    \n
  6. \n
  7. \n

    The media player receives the authenticated URL and requests stream metadata\n and media data normally. When the media player requests data, it calls the\n following actions:

    \n
      \n
    • \n

      \n GetDASHManifest: Retrieves an MPEG DASH\n manifest, which contains the metadata for the media that you want to\n playback.

      \n
    • \n
    • \n

      \n GetMP4InitFragment: Retrieves the MP4\n initialization fragment. The media player typically loads the\n initialization fragment before loading any media fragments. This\n fragment contains the \"fytp\" and \"moov\" MP4\n atoms, and the child atoms that are needed to initialize the media\n player decoder.

      \n

      The initialization fragment does not correspond to a fragment in a\n Kinesis video stream. It contains only the codec private data for the\n stream and respective track, which the media player needs to decode the\n media frames.

      \n
    • \n
    • \n

      \n GetMP4MediaFragment: Retrieves MP4\n media fragments. These fragments contain the \"moof\" and\n \"mdat\" MP4 atoms and their child atoms, containing the\n encoded fragment's media frames and their timestamps.

      \n \n

      After the first media fragment is made available in a streaming\n session, any fragments that don't contain the same codec private\n data cause an error to be returned when those different media\n fragments are loaded. Therefore, the codec private data should not\n change between fragments in a session. This also means that the\n session fails if the fragments in a stream change from having only\n video to having both audio and video.

      \n
      \n

      Data retrieved with this action is billable. See Pricing for details.

      \n
    • \n
    \n
  8. \n
\n \n

For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.

\n
\n

You can monitor the amount of data that the media player consumes by monitoring the\n GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For\n information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video\n Streams Pricing and Amazon Web Services\n Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply.

\n

For more information about HLS, see HTTP Live Streaming on the\n Apple Developer site.

\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", "smithy.api#http": { "method": "POST", "uri": "/getDASHStreamingSessionURL", @@ -1397,51 +1471,54 @@ "StreamName": { "target": "com.amazonaws.kinesisvideoarchivedmedia#StreamName", "traits": { - "smithy.api#documentation": "

The name of the stream for which to retrieve the MPEG-DASH manifest URL.

\n

You must specify either the StreamName or the\n StreamARN.

" + "smithy.api#documentation": "

The name of the stream for which to retrieve the MPEG-DASH manifest URL.

\n

You must specify either the StreamName or the\n StreamARN.

" } }, "StreamARN": { "target": "com.amazonaws.kinesisvideoarchivedmedia#ResourceARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the stream for which to retrieve the MPEG-DASH\n manifest URL.

\n

You must specify either the StreamName or the\n StreamARN.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the stream for which to retrieve the MPEG-DASH\n manifest URL.

\n

You must specify either the StreamName or the\n StreamARN.

" } }, "PlaybackMode": { "target": "com.amazonaws.kinesisvideoarchivedmedia#DASHPlaybackMode", "traits": { - "smithy.api#documentation": "

Whether to retrieve live, live replay, or archived, on-demand data.

\n

Features of the three types of sessions include the following:

\n \n

In all playback modes, if FragmentSelectorType is\n PRODUCER_TIMESTAMP, and if there are multiple fragments with the same\n start timestamp, the fragment that has the larger fragment number (that is, the newer\n fragment) is included in the MPEG-DASH manifest. The other fragments are not included.\n Fragments that have different timestamps but have overlapping durations are still\n included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media\n player.

\n

The default is LIVE.

" + "smithy.api#documentation": "

Whether to retrieve live, live replay, or archived, on-demand data.

\n

Features of the three types of sessions include the following:

\n \n

In all playback modes, if FragmentSelectorType is\n PRODUCER_TIMESTAMP, and if there are multiple fragments with the same\n start timestamp, the fragment that has the larger fragment number (that is, the newer\n fragment) is included in the MPEG-DASH manifest. The other fragments are not included.\n Fragments that have different timestamps but have overlapping durations are still\n included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media\n player.

\n

The default is LIVE.

" } }, "DisplayFragmentTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#DASHDisplayFragmentTimestamp", "traits": { - "smithy.api#documentation": "

Per the MPEG-DASH specification, the wall-clock time of fragments in the manifest file\n can be derived using attributes in the manifest itself. However, typically, MPEG-DASH\n compatible media players do not properly handle gaps in the media timeline. Kinesis\n Video Streams adjusts the media timeline in the manifest file to enable playback of\n media with discontinuities. Therefore, the wall-clock time derived from the manifest\n file may be inaccurate. If DisplayFragmentTimestamp is set to ALWAYS, the\n accurate fragment timestamp is added to each S element in the manifest file with the\n attribute name “kvs:ts”. A custom MPEG-DASH media player is necessary to leverage this\n custom attribute.

\n

The default value is NEVER. When DASHFragmentSelector\n is SERVER_TIMESTAMP, the timestamps will be the server start timestamps.\n Similarly, when DASHFragmentSelector is\n PRODUCER_TIMESTAMP, the timestamps will be the producer start\n timestamps.

" + "smithy.api#documentation": "

Per the MPEG-DASH specification, the wall-clock time of fragments in the manifest file\n can be derived using attributes in the manifest itself. However, typically, MPEG-DASH\n compatible media players do not properly handle gaps in the media timeline. Kinesis\n Video Streams adjusts the media timeline in the manifest file to enable playback of\n media with discontinuities. Therefore, the wall-clock time derived from the manifest\n file may be inaccurate. If DisplayFragmentTimestamp is set to ALWAYS, the\n accurate fragment timestamp is added to each S element in the manifest file with the\n attribute name “kvs:ts”. A custom MPEG-DASH media player is necessary to leverage this\n custom attribute.

\n

The default value is NEVER. When DASHFragmentSelector\n is SERVER_TIMESTAMP, the timestamps will be the server start timestamps.\n Similarly, when DASHFragmentSelector is\n PRODUCER_TIMESTAMP, the timestamps will be the producer start\n timestamps.

" } }, "DisplayFragmentNumber": { "target": "com.amazonaws.kinesisvideoarchivedmedia#DASHDisplayFragmentNumber", "traits": { - "smithy.api#documentation": "

Fragments are identified in the manifest file based on their sequence number in the\n session. If DisplayFragmentNumber is set to ALWAYS, the Kinesis Video\n Streams fragment number is added to each S element in the manifest file with the\n attribute name “kvs:fn”. These fragment numbers can be used for logging or for use with\n other APIs (e.g. GetMedia and GetMediaForFragmentList). A\n custom MPEG-DASH media player is necessary to leverage these this custom\n attribute.

\n

The default value is NEVER.

" + "smithy.api#documentation": "

Fragments are identified in the manifest file based on their sequence number in the\n session. If DisplayFragmentNumber is set to ALWAYS, the Kinesis Video\n Streams fragment number is added to each S element in the manifest file with the\n attribute name “kvs:fn”. These fragment numbers can be used for logging or for use with\n other APIs (e.g. GetMedia and GetMediaForFragmentList). A\n custom MPEG-DASH media player is necessary to leverage these this custom\n attribute.

\n

The default value is NEVER.

" } }, "DASHFragmentSelector": { "target": "com.amazonaws.kinesisvideoarchivedmedia#DASHFragmentSelector", "traits": { - "smithy.api#documentation": "

The time range of the requested fragment and the source of the timestamps.

\n

This parameter is required if PlaybackMode is ON_DEMAND or\n LIVE_REPLAY. This parameter is optional if PlaybackMode is\n LIVE. If PlaybackMode is LIVE, the\n FragmentSelectorType can be set, but the TimestampRange\n should not be set. If PlaybackMode is ON_DEMAND or\n LIVE_REPLAY, both FragmentSelectorType and\n TimestampRange must be set.

" + "smithy.api#documentation": "

The time range of the requested fragment and the source of the timestamps.

\n

This parameter is required if PlaybackMode is ON_DEMAND or\n LIVE_REPLAY. This parameter is optional if PlaybackMode is\n LIVE. If PlaybackMode is LIVE, the\n FragmentSelectorType can be set, but the TimestampRange\n should not be set. If PlaybackMode is ON_DEMAND or\n LIVE_REPLAY, both FragmentSelectorType and\n TimestampRange must be set.

" } }, "Expires": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Expires", "traits": { - "smithy.api#documentation": "

The time in seconds until the requested session expires. This value can be between 300\n (5 minutes) and 43200 (12 hours).

\n

When a session expires, no new calls to GetDashManifest,\n GetMP4InitFragment, or GetMP4MediaFragment can be made for\n that session.

\n

The default is 300 (5 minutes).

" + "smithy.api#documentation": "

The time in seconds until the requested session expires. This value can be between 300\n (5 minutes) and 43200 (12 hours).

\n

When a session expires, no new calls to GetDashManifest,\n GetMP4InitFragment, or GetMP4MediaFragment can be made for\n that session.

\n

The default is 300 (5 minutes).

" } }, "MaxManifestFragmentResults": { "target": "com.amazonaws.kinesisvideoarchivedmedia#DASHMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of fragments that are returned in the MPEG-DASH manifest.

\n

When the PlaybackMode is LIVE, the most recent fragments are\n returned up to this value. When the PlaybackMode is ON_DEMAND,\n the oldest fragments are returned, up to this maximum number.

\n

When there are a higher number of fragments available in a live MPEG-DASH manifest,\n video players often buffer content before starting playback. Increasing the buffer size\n increases the playback latency, but it decreases the likelihood that rebuffering will\n occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3\n fragments and a maximum of 10 fragments.

\n

The default is 5 fragments if PlaybackMode is LIVE or\n LIVE_REPLAY, and 1,000 if PlaybackMode is\n ON_DEMAND.

\n

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on\n streams with 1-second fragments, and more than 2 1/2 hours of video on streams with\n 10-second fragments.

" + "smithy.api#documentation": "

The maximum number of fragments that are returned in the MPEG-DASH manifest.

\n

When the PlaybackMode is LIVE, the most recent fragments are\n returned up to this value. When the PlaybackMode is ON_DEMAND,\n the oldest fragments are returned, up to this maximum number.

\n

When there are a higher number of fragments available in a live MPEG-DASH manifest,\n video players often buffer content before starting playback. Increasing the buffer size\n increases the playback latency, but it decreases the likelihood that rebuffering will\n occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3\n fragments and a maximum of 10 fragments.

\n

The default is 5 fragments if PlaybackMode is LIVE or\n LIVE_REPLAY, and 1,000 if PlaybackMode is\n ON_DEMAND.

\n

The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on\n streams with 1-second fragments, and more than 2 1/2 hours of video on streams with\n 10-second fragments.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetDASHStreamingSessionURLOutput": { @@ -1453,6 +1530,9 @@ "smithy.api#documentation": "

The URL (containing the session token) that a media player can use to retrieve the\n MPEG-DASH manifest.

" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetHLSStreamingSessionURL": { @@ -1490,7 +1570,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL\n in a browser or media player to view the stream contents.

\n

Both the StreamName and the StreamARN parameters are\n optional, but you must specify either the StreamName or the\n StreamARN when invoking this API operation.

\n

An Amazon Kinesis video stream has the following requirements for providing data\n through HLS:

\n \n

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form\n (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS\n specification also supports). For more information about HLS fragment types, see the\n HLS\n specification.

\n

The following procedure shows how to use HLS with Kinesis Video Streams:

\n
    \n
  1. \n

    Get an endpoint using GetDataEndpoint, specifying\n GET_HLS_STREAMING_SESSION_URL for the APIName\n parameter.

    \n
  2. \n
  3. \n

    Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis\n Video Streams creates an HLS streaming session to be used for accessing content\n in a stream using the HLS protocol. GetHLSStreamingSessionURL\n returns an authenticated URL (that includes an encrypted session token) for the\n session's HLS master playlist (the root resource needed for\n streaming with HLS).

    \n \n

    Don't share or store this token where an unauthorized entity could access\n it. The token provides access to the content of the stream. Safeguard the\n token with the same measures that you would use with your AWS\n credentials.

    \n
    \n

    The media that is made available through the playlist consists only of the\n requested stream, time range, and format. No other media data (such as frames\n outside the requested window or alternate bitrates) is made available.

    \n
  4. \n
  5. \n

    Provide the URL (containing the encrypted session token) for the HLS master\n playlist to a media player that supports the HLS protocol. Kinesis Video Streams\n makes the HLS media playlist, initialization fragment, and media fragments\n available through the master playlist URL. The initialization fragment contains\n the codec private data for the stream, and other data needed to set up the video\n or audio decoder and renderer. The media fragments contain H.264-encoded video\n frames or AAC-encoded audio samples.

    \n
  6. \n
  7. \n

    The media player receives the authenticated URL and requests stream metadata\n and media data normally. When the media player requests data, it calls the\n following actions:

    \n
      \n
    • \n

      \n GetHLSMasterPlaylist: Retrieves an HLS\n master playlist, which contains a URL for the\n GetHLSMediaPlaylist action for each track, and\n additional metadata for the media player, including estimated bitrate\n and resolution.

      \n
    • \n
    • \n

      \n GetHLSMediaPlaylist: Retrieves an HLS\n media playlist, which contains a URL to access the MP4 initialization\n fragment with the GetMP4InitFragment action, and URLs to\n access the MP4 media fragments with the GetMP4MediaFragment\n actions. The HLS media playlist also contains metadata about the stream\n that the player needs to play it, such as whether the\n PlaybackMode is LIVE or\n ON_DEMAND. The HLS media playlist is typically static\n for sessions with a PlaybackType of ON_DEMAND.\n The HLS media playlist is continually updated with new fragments for\n sessions with a PlaybackType of LIVE. There is\n a distinct HLS media playlist for the video track and the audio track\n (if applicable) that contains MP4 media URLs for the specific track.\n

      \n
    • \n
    • \n

      \n GetMP4InitFragment: Retrieves the MP4\n initialization fragment. The media player typically loads the\n initialization fragment before loading any media fragments. This\n fragment contains the \"fytp\" and \"moov\" MP4\n atoms, and the child atoms that are needed to initialize the media\n player decoder.

      \n

      The initialization fragment does not correspond to a fragment in a\n Kinesis video stream. It contains only the codec private data for the\n stream and respective track, which the media player needs to decode the\n media frames.

      \n
    • \n
    • \n

      \n GetMP4MediaFragment: Retrieves MP4\n media fragments. These fragments contain the \"moof\" and\n \"mdat\" MP4 atoms and their child atoms, containing the\n encoded fragment's media frames and their timestamps.

      \n \n

      After the first media fragment is made available in a streaming\n session, any fragments that don't contain the same codec private\n data cause an error to be returned when those different media\n fragments are loaded. Therefore, the codec private data should not\n change between fragments in a session. This also means that the\n session fails if the fragments in a stream change from having only\n video to having both audio and video.

      \n
      \n

      Data retrieved with this action is billable. See Pricing for details.

      \n
    • \n
    • \n

      \n GetTSFragment: Retrieves MPEG TS\n fragments containing both initialization and media data for all tracks\n in the stream.

      \n \n

      If the ContainerFormat is MPEG_TS, this\n API is used instead of GetMP4InitFragment and\n GetMP4MediaFragment to retrieve stream\n media.

      \n
      \n

      Data retrieved with this action is billable. For more information, see\n Kinesis Video Streams pricing.

      \n
    • \n
    \n
  8. \n
\n

A streaming session URL must not be shared between players. The service\n might throttle a session if multiple media players are sharing it. For\n connection limits, see Kinesis Video Streams Limits.

\n

You can monitor the amount of data that the media player consumes by monitoring the\n GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For\n information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video\n Streams Pricing and AWS\n Pricing. Charges for both HLS sessions and outgoing AWS data apply.

\n

For more information about HLS, see HTTP Live Streaming on the\n Apple Developer site.

\n\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", + "smithy.api#documentation": "

Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL\n in a browser or media player to view the stream contents.

\n

Both the StreamName and the StreamARN parameters are\n optional, but you must specify either the StreamName or the\n StreamARN when invoking this API operation.

\n

An Amazon Kinesis video stream has the following requirements for providing data\n through HLS:

\n \n

Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form\n (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS\n specification also supports). For more information about HLS fragment types, see the\n HLS\n specification.

\n

The following procedure shows how to use HLS with Kinesis Video Streams:

\n
    \n
  1. \n

    Get an endpoint using GetDataEndpoint, specifying\n GET_HLS_STREAMING_SESSION_URL for the APIName\n parameter.

    \n
  2. \n
  3. \n

    Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis\n Video Streams creates an HLS streaming session to be used for accessing content\n in a stream using the HLS protocol. GetHLSStreamingSessionURL\n returns an authenticated URL (that includes an encrypted session token) for the\n session's HLS master playlist (the root resource needed for\n streaming with HLS).

    \n \n

    Don't share or store this token where an unauthorized entity could access\n it. The token provides access to the content of the stream. Safeguard the\n token with the same measures that you would use with your Amazon Web Services\n credentials.

    \n
    \n

    The media that is made available through the playlist consists only of the\n requested stream, time range, and format. No other media data (such as frames\n outside the requested window or alternate bitrates) is made available.

    \n
  4. \n
  5. \n

    Provide the URL (containing the encrypted session token) for the HLS master\n playlist to a media player that supports the HLS protocol. Kinesis Video Streams\n makes the HLS media playlist, initialization fragment, and media fragments\n available through the master playlist URL. The initialization fragment contains\n the codec private data for the stream, and other data needed to set up the video\n or audio decoder and renderer. The media fragments contain H.264-encoded video\n frames or AAC-encoded audio samples.

    \n
  6. \n
  7. \n

    The media player receives the authenticated URL and requests stream metadata\n and media data normally. When the media player requests data, it calls the\n following actions:

    \n
      \n
    • \n

      \n GetHLSMasterPlaylist: Retrieves an HLS\n master playlist, which contains a URL for the\n GetHLSMediaPlaylist action for each track, and\n additional metadata for the media player, including estimated bitrate\n and resolution.

      \n
    • \n
    • \n

      \n GetHLSMediaPlaylist: Retrieves an HLS\n media playlist, which contains a URL to access the MP4 initialization\n fragment with the GetMP4InitFragment action, and URLs to\n access the MP4 media fragments with the GetMP4MediaFragment\n actions. The HLS media playlist also contains metadata about the stream\n that the player needs to play it, such as whether the\n PlaybackMode is LIVE or\n ON_DEMAND. The HLS media playlist is typically static\n for sessions with a PlaybackType of ON_DEMAND.\n The HLS media playlist is continually updated with new fragments for\n sessions with a PlaybackType of LIVE. There is\n a distinct HLS media playlist for the video track and the audio track\n (if applicable) that contains MP4 media URLs for the specific track.\n

      \n
    • \n
    • \n

      \n GetMP4InitFragment: Retrieves the MP4\n initialization fragment. The media player typically loads the\n initialization fragment before loading any media fragments. This\n fragment contains the \"fytp\" and \"moov\" MP4\n atoms, and the child atoms that are needed to initialize the media\n player decoder.

      \n

      The initialization fragment does not correspond to a fragment in a\n Kinesis video stream. It contains only the codec private data for the\n stream and respective track, which the media player needs to decode the\n media frames.

      \n
    • \n
    • \n

      \n GetMP4MediaFragment: Retrieves MP4\n media fragments. These fragments contain the \"moof\" and\n \"mdat\" MP4 atoms and their child atoms, containing the\n encoded fragment's media frames and their timestamps.

      \n \n

      For the HLS streaming session, in-track codec private data (CPD)\n changes are supported. After the first media fragment is made\n available in a streaming session, fragments can contain CPD changes\n for each track. Therefore, the fragments in a session can have a\n different resolution, bit rate, or other information in the CPD\n without interrupting playback. However, any change made in the track\n number or track codec format can return an error when those\n different media fragments are loaded. For example, streaming will\n fail if the fragments in the stream change from having only video to\n having both audio and video, or if an AAC audio track is changed to\n an ALAW audio track. For each streaming session, only 500 CPD\n changes are allowed.

      \n
      \n

      Data retrieved with this action is billable. For information, see\n Pricing.

      \n
    • \n
    • \n

      \n GetTSFragment: Retrieves MPEG TS\n fragments containing both initialization and media data for all tracks\n in the stream.

      \n \n

      If the ContainerFormat is MPEG_TS, this\n API is used instead of GetMP4InitFragment and\n GetMP4MediaFragment to retrieve stream\n media.

      \n
      \n

      Data retrieved with this action is billable. For more information, see\n Kinesis Video Streams pricing.

      \n
    • \n
    \n
  8. \n
\n

A streaming session URL must not be shared between players. The service\n might throttle a session if multiple media players are sharing it. For\n connection limits, see Kinesis Video Streams Limits.

\n

You can monitor the amount of data that the media player consumes by monitoring the\n GetMP4MediaFragment.OutgoingBytes Amazon CloudWatch metric. For\n information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video\n Streams Pricing and Amazon Web Services\n Pricing. Charges for both HLS sessions and outgoing Amazon Web Services data apply.

\n

For more information about HLS, see HTTP Live Streaming on the\n Apple Developer site.

\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", "smithy.api#http": { "method": "POST", "uri": "/getHLSStreamingSessionURL", @@ -1504,57 +1584,60 @@ "StreamName": { "target": "com.amazonaws.kinesisvideoarchivedmedia#StreamName", "traits": { - "smithy.api#documentation": "

The name of the stream for which to retrieve the HLS master playlist URL.

\n

You must specify either the StreamName or the\n StreamARN.

" + "smithy.api#documentation": "

The name of the stream for which to retrieve the HLS master playlist URL.

\n

You must specify either the StreamName or the\n StreamARN.

" } }, "StreamARN": { "target": "com.amazonaws.kinesisvideoarchivedmedia#ResourceARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the stream for which to retrieve the HLS master\n playlist URL.

\n

You must specify either the StreamName or the\n StreamARN.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the stream for which to retrieve the HLS master\n playlist URL.

\n

You must specify either the StreamName or the\n StreamARN.

" } }, "PlaybackMode": { "target": "com.amazonaws.kinesisvideoarchivedmedia#HLSPlaybackMode", "traits": { - "smithy.api#documentation": "

Whether to retrieve live, live replay, or archived, on-demand data.

\n

Features of the three types of sessions include the following:

\n \n

In all playback modes, if FragmentSelectorType is\n PRODUCER_TIMESTAMP, and if there are multiple fragments with the same\n start timestamp, the fragment that has the largest fragment number (that is, the newest\n fragment) is included in the HLS media playlist. The other fragments are not included.\n Fragments that have different timestamps but have overlapping durations are still\n included in the HLS media playlist. This can lead to unexpected behavior in the media\n player.

\n

The default is LIVE.

" + "smithy.api#documentation": "

Whether to retrieve live, live replay, or archived, on-demand data.

\n

Features of the three types of sessions include the following:

\n \n

In all playback modes, if FragmentSelectorType is\n PRODUCER_TIMESTAMP, and if there are multiple fragments with the same\n start timestamp, the fragment that has the largest fragment number (that is, the newest\n fragment) is included in the HLS media playlist. The other fragments are not included.\n Fragments that have different timestamps but have overlapping durations are still\n included in the HLS media playlist. This can lead to unexpected behavior in the media\n player.

\n

The default is LIVE.

" } }, "HLSFragmentSelector": { "target": "com.amazonaws.kinesisvideoarchivedmedia#HLSFragmentSelector", "traits": { - "smithy.api#documentation": "

The time range of the requested fragment and the source of the timestamps.

\n

This parameter is required if PlaybackMode is ON_DEMAND or\n LIVE_REPLAY. This parameter is optional if PlaybackMode is\n LIVE. If PlaybackMode is LIVE, the\n FragmentSelectorType can be set, but the TimestampRange\n should not be set. If PlaybackMode is ON_DEMAND or\n LIVE_REPLAY, both FragmentSelectorType and\n TimestampRange must be set.

" + "smithy.api#documentation": "

The time range of the requested fragment and the source of the timestamps.

\n

This parameter is required if PlaybackMode is ON_DEMAND or\n LIVE_REPLAY. This parameter is optional if PlaybackMode is\n LIVE. If PlaybackMode is LIVE, the\n FragmentSelectorType can be set, but the TimestampRange\n should not be set. If PlaybackMode is ON_DEMAND or\n LIVE_REPLAY, both FragmentSelectorType and\n TimestampRange must be set.

" } }, "ContainerFormat": { "target": "com.amazonaws.kinesisvideoarchivedmedia#ContainerFormat", "traits": { - "smithy.api#documentation": "

Specifies which format should be used for packaging the media. Specifying the\n FRAGMENTED_MP4 container format packages the media into MP4 fragments\n (fMP4 or CMAF). This is the recommended packaging because there is minimal packaging\n overhead. The other container format option is MPEG_TS. HLS has supported\n MPEG TS chunks since it was released and is sometimes the only supported packaging on\n older HLS players. MPEG TS typically has a 5-25 percent packaging overhead. This means\n MPEG TS typically requires 5-25 percent more bandwidth and cost than fMP4.

\n

The default is FRAGMENTED_MP4.

" + "smithy.api#documentation": "

Specifies which format should be used for packaging the media. Specifying the\n FRAGMENTED_MP4 container format packages the media into MP4 fragments\n (fMP4 or CMAF). This is the recommended packaging because there is minimal packaging\n overhead. The other container format option is MPEG_TS. HLS has supported\n MPEG TS chunks since it was released and is sometimes the only supported packaging on\n older HLS players. MPEG TS typically has a 5-25 percent packaging overhead. This means\n MPEG TS typically requires 5-25 percent more bandwidth and cost than fMP4.

\n

The default is FRAGMENTED_MP4.

" } }, "DiscontinuityMode": { "target": "com.amazonaws.kinesisvideoarchivedmedia#HLSDiscontinuityMode", "traits": { - "smithy.api#documentation": "

Specifies when flags marking discontinuities between fragments are added to the media\n playlists.

\n

Media players typically build a timeline of media content to play, based on the\n timestamps of each fragment. This means that if there is any overlap or gap between\n fragments (as is typical if HLSFragmentSelector is set to\n SERVER_TIMESTAMP), the media player timeline will also have small gaps\n between fragments in some places, and will overwrite frames in other places. Gaps in the\n media player timeline can cause playback to stall and overlaps can cause playback to be\n jittery. When there are discontinuity flags between fragments, the media player is\n expected to reset the timeline, resulting in the next fragment being played immediately\n after the previous fragment.

\n

The following modes are supported:

\n \n

The default is ALWAYS when HLSFragmentSelector is set\n to SERVER_TIMESTAMP, and NEVER when it is set to\n PRODUCER_TIMESTAMP.

" + "smithy.api#documentation": "

Specifies when flags marking discontinuities between fragments are added to the media\n playlists.

\n

Media players typically build a timeline of media content to play, based on the\n timestamps of each fragment. This means that if there is any overlap or gap between\n fragments (as is typical if HLSFragmentSelector is set to\n SERVER_TIMESTAMP), the media player timeline will also have small gaps\n between fragments in some places, and will overwrite frames in other places. Gaps in the\n media player timeline can cause playback to stall and overlaps can cause playback to be\n jittery. When there are discontinuity flags between fragments, the media player is\n expected to reset the timeline, resulting in the next fragment being played immediately\n after the previous fragment.

\n

The following modes are supported:

\n \n

The default is ALWAYS when HLSFragmentSelector is set\n to SERVER_TIMESTAMP, and NEVER when it is set to\n PRODUCER_TIMESTAMP.

" } }, "DisplayFragmentTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#HLSDisplayFragmentTimestamp", "traits": { - "smithy.api#documentation": "

Specifies when the fragment start timestamps should be included in the HLS media\n playlist. Typically, media players report the playhead position as a time relative to\n the start of the first fragment in the playback session. However, when the start\n timestamps are included in the HLS media playlist, some media players might report the\n current playhead as an absolute time based on the fragment timestamps. This can be\n useful for creating a playback experience that shows viewers the wall-clock time of the\n media.

\n

The default is NEVER. When HLSFragmentSelector is\n SERVER_TIMESTAMP, the timestamps will be the server start timestamps.\n Similarly, when HLSFragmentSelector is\n PRODUCER_TIMESTAMP, the timestamps will be the producer start timestamps.\n

" + "smithy.api#documentation": "

Specifies when the fragment start timestamps should be included in the HLS media\n playlist. Typically, media players report the playhead position as a time relative to\n the start of the first fragment in the playback session. However, when the start\n timestamps are included in the HLS media playlist, some media players might report the\n current playhead as an absolute time based on the fragment timestamps. This can be\n useful for creating a playback experience that shows viewers the wall-clock time of the\n media.

\n

The default is NEVER. When HLSFragmentSelector is\n SERVER_TIMESTAMP, the timestamps will be the server start timestamps.\n Similarly, when HLSFragmentSelector is\n PRODUCER_TIMESTAMP, the timestamps will be the producer start timestamps.\n

" } }, "Expires": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Expires", "traits": { - "smithy.api#documentation": "

The time in seconds until the requested session expires. This value can be between 300\n (5 minutes) and 43200 (12 hours).

\n

When a session expires, no new calls to GetHLSMasterPlaylist,\n GetHLSMediaPlaylist, GetMP4InitFragment,\n GetMP4MediaFragment, or GetTSFragment can be made for that\n session.

\n

The default is 300 (5 minutes).

" + "smithy.api#documentation": "

The time in seconds until the requested session expires. This value can be between 300\n (5 minutes) and 43200 (12 hours).

\n

When a session expires, no new calls to GetHLSMasterPlaylist,\n GetHLSMediaPlaylist, GetMP4InitFragment,\n GetMP4MediaFragment, or GetTSFragment can be made for that\n session.

\n

The default is 300 (5 minutes).

" } }, "MaxMediaPlaylistFragmentResults": { "target": "com.amazonaws.kinesisvideoarchivedmedia#HLSMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of fragments that are returned in the HLS media playlists.

\n

When the PlaybackMode is LIVE, the most recent fragments are\n returned up to this value. When the PlaybackMode is ON_DEMAND,\n the oldest fragments are returned, up to this maximum number.

\n

When there are a higher number of fragments available in a live HLS media playlist,\n video players often buffer content before starting playback. Increasing the buffer size\n increases the playback latency, but it decreases the likelihood that rebuffering will\n occur during playback. We recommend that a live HLS media playlist have a minimum of 3\n fragments and a maximum of 10 fragments.

\n

The default is 5 fragments if PlaybackMode is LIVE or\n LIVE_REPLAY, and 1,000 if PlaybackMode is\n ON_DEMAND.

\n

The maximum value of 5,000 fragments corresponds to more than 80 minutes of video on\n streams with 1-second fragments, and more than 13 hours of video on streams with\n 10-second fragments.

" + "smithy.api#documentation": "

The maximum number of fragments that are returned in the HLS media playlists.

\n

When the PlaybackMode is LIVE, the most recent fragments are\n returned up to this value. When the PlaybackMode is ON_DEMAND,\n the oldest fragments are returned, up to this maximum number.

\n

When there are a higher number of fragments available in a live HLS media playlist,\n video players often buffer content before starting playback. Increasing the buffer size\n increases the playback latency, but it decreases the likelihood that rebuffering will\n occur during playback. We recommend that a live HLS media playlist have a minimum of 3\n fragments and a maximum of 10 fragments.

\n

The default is 5 fragments if PlaybackMode is LIVE or\n LIVE_REPLAY, and 1,000 if PlaybackMode is\n ON_DEMAND.

\n

The maximum value of 5,000 fragments corresponds to more than 80 minutes of video on\n streams with 1-second fragments, and more than 13 hours of video on streams with\n 10-second fragments.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetHLSStreamingSessionURLOutput": { @@ -1566,6 +1649,9 @@ "smithy.api#documentation": "

The URL (containing the session token) that a media player can use to retrieve the HLS\n master playlist.

" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetImages": { @@ -1631,15 +1717,14 @@ "EndTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The end timestamp for the range of images to be generated.

", + "smithy.api#documentation": "

The end timestamp for the range of images to be generated. If the time range between StartTimestamp and EndTimestamp is more than 300 seconds above StartTimestamp, you will receive an IllegalArgumentException.

", "smithy.api#required": {} } }, "SamplingInterval": { "target": "com.amazonaws.kinesisvideoarchivedmedia#SamplingInterval", "traits": { - "smithy.api#documentation": "

The time interval in milliseconds (ms) at which the images need to be generated from the stream. The minimum value that can be provided is 3000 ms. If the timestamp range is less than the sampling interval, the Image from the startTimestamp will be returned if available.\n

\n \n

The minimum value of 3000 ms is a soft limit. If needed, a lower sampling frequency can be requested.

\n
", - "smithy.api#required": {} + "smithy.api#documentation": "

The time interval in milliseconds (ms) at which the images need to be generated from\n the stream, with a default of 3000 ms. The minimum value that can be provided is 200 ms.\n If the timestamp range is less than the sampling interval, the Image from the\n startTimestamp will be returned if available.

\n \n

The minimum value of 200 ms is a hard limit.

\n
" } }, "Format": { @@ -1670,7 +1755,7 @@ "MaxResults": { "target": "com.amazonaws.kinesisvideoarchivedmedia#GetImagesMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of images to be returned by the API.

\n \n

The default limit is 100 images per API response. The additional results will be paginated.

\n
" + "smithy.api#documentation": "

The maximum number of images to be returned by the API.

\n \n

The default limit is 25 images per API response. Providing a MaxResults greater than this value will result in a page size of 25. Any additional results will be paginated.

\n
" } }, "NextToken": { @@ -1679,6 +1764,9 @@ "smithy.api#documentation": "

A token that specifies where to start paginating the next set of Images. This is the GetImages:NextToken from a previously truncated response.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetImagesMaxResults": { @@ -1705,6 +1793,9 @@ "smithy.api#documentation": "

The encrypted token that was used in the request to get more images.

" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetMediaForFragmentList": { @@ -1730,7 +1821,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets media for a list of fragments (specified by fragment number) from the archived\n data in an Amazon Kinesis video stream.

\n\n \n

You must first call the GetDataEndpoint API to get an endpoint.\n Then send the GetMediaForFragmentList requests to this endpoint using\n the --endpoint-url\n parameter.

\n
\n\n

For limits, see Kinesis Video Streams Limits.

\n\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", + "smithy.api#documentation": "

Gets media for a list of fragments (specified by fragment number) from the archived\n data in an Amazon Kinesis video stream.

\n \n

You must first call the GetDataEndpoint API to get an endpoint.\n Then send the GetMediaForFragmentList requests to this endpoint using\n the --endpoint-url\n parameter.

\n
\n

For limits, see Kinesis Video Streams Limits.

\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", "smithy.api#http": { "method": "POST", "uri": "/getMediaForFragmentList", @@ -1760,6 +1851,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#GetMediaForFragmentListOutput": { @@ -1776,44 +1870,53 @@ "target": "com.amazonaws.kinesisvideoarchivedmedia#Payload", "traits": { "smithy.api#default": "", - "smithy.api#documentation": "

The payload that Kinesis Video Streams returns is a sequence of chunks from the\n specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the\n GetMediaForFragmentList call also include the following additional\n Matroska (MKV) tags:

\n \n

The following tags will be included if an exception occurs:

\n ", + "smithy.api#documentation": "

The payload that Kinesis Video Streams returns is a sequence of chunks from the\n specified stream. For information about the chunks, see PutMedia. The chunks that Kinesis Video Streams returns in the\n GetMediaForFragmentList call also include the following additional\n Matroska (MKV) tags:

\n \n

The following tags will be included if an exception occurs:

\n ", "smithy.api#httpPayload": {} } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#HLSDiscontinuityMode": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "ALWAYS", - "name": "ALWAYS" - }, - { - "value": "NEVER", - "name": "NEVER" - }, - { - "value": "ON_DISCONTINUITY", - "name": "ON_DISCONTINUITY" + "type": "enum", + "members": { + "ALWAYS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALWAYS" + } + }, + "NEVER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEVER" + } + }, + "ON_DISCONTINUITY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ON_DISCONTINUITY" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#HLSDisplayFragmentTimestamp": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "ALWAYS", - "name": "ALWAYS" - }, - { - "value": "NEVER", - "name": "NEVER" + "type": "enum", + "members": { + "ALWAYS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALWAYS" } - ] + }, + "NEVER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEVER" + } + } } }, "com.amazonaws.kinesisvideoarchivedmedia#HLSFragmentSelector": { @@ -1822,13 +1925,13 @@ "FragmentSelectorType": { "target": "com.amazonaws.kinesisvideoarchivedmedia#HLSFragmentSelectorType", "traits": { - "smithy.api#documentation": "

The source of the timestamps for the requested media.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetHLSStreamingSessionURLInput$PlaybackMode is\n ON_DEMAND or LIVE_REPLAY, the first fragment ingested with\n a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In\n addition, the fragments with producer timestamps within the TimestampRange\n ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value)\n are included.

\n

Fragments that have duplicate producer timestamps are deduplicated. This means that if\n producers are producing a stream of fragments with producer timestamps that are\n approximately equal to the true clock time, the HLS media playlists will contain all of\n the fragments within the requested timestamp range. If some fragments are ingested\n within the same time range and very different points in time, only the oldest ingested\n collection of fragments are returned.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetHLSStreamingSessionURLInput$PlaybackMode is LIVE,\n the producer timestamps are used in the MP4 fragments and for deduplication. But the\n most recently ingested fragments based on server timestamps are included in the HLS\n media playlist. This means that even if fragments ingested in the past have producer\n timestamps with values now, they are not included in the HLS media playlist.

\n

The default is SERVER_TIMESTAMP.

" + "smithy.api#documentation": "

The source of the timestamps for the requested media.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetHLSStreamingSessionURLInput$PlaybackMode is\n ON_DEMAND or LIVE_REPLAY, the first fragment ingested with\n a producer timestamp within the specified FragmentSelector$TimestampRange is included in the media playlist. In\n addition, the fragments with producer timestamps within the TimestampRange\n ingested immediately following the first fragment (up to the GetHLSStreamingSessionURLInput$MaxMediaPlaylistFragmentResults value)\n are included.

\n

Fragments that have duplicate producer timestamps are deduplicated. This means that if\n producers are producing a stream of fragments with producer timestamps that are\n approximately equal to the true clock time, the HLS media playlists will contain all of\n the fragments within the requested timestamp range. If some fragments are ingested\n within the same time range and very different points in time, only the oldest ingested\n collection of fragments are returned.

\n

When FragmentSelectorType is set to PRODUCER_TIMESTAMP and\n GetHLSStreamingSessionURLInput$PlaybackMode is LIVE,\n the producer timestamps are used in the MP4 fragments and for deduplication. But the\n most recently ingested fragments based on server timestamps are included in the HLS\n media playlist. This means that even if fragments ingested in the past have producer\n timestamps with values now, they are not included in the HLS media playlist.

\n

The default is SERVER_TIMESTAMP.

" } }, "TimestampRange": { "target": "com.amazonaws.kinesisvideoarchivedmedia#HLSTimestampRange", "traits": { - "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

" + "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

" } } }, @@ -1837,18 +1940,20 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#HLSFragmentSelectorType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PRODUCER_TIMESTAMP", - "name": "PRODUCER_TIMESTAMP" - }, - { - "value": "SERVER_TIMESTAMP", - "name": "SERVER_TIMESTAMP" + "type": "enum", + "members": { + "PRODUCER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRODUCER_TIMESTAMP" } - ] + }, + "SERVER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_TIMESTAMP" + } + } } }, "com.amazonaws.kinesisvideoarchivedmedia#HLSMaxResults": { @@ -1861,22 +1966,26 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#HLSPlaybackMode": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "LIVE", - "name": "LIVE" - }, - { - "value": "LIVE_REPLAY", - "name": "LIVE_REPLAY" - }, - { - "value": "ON_DEMAND", - "name": "ON_DEMAND" + "type": "enum", + "members": { + "LIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LIVE" } - ] + }, + "LIVE_REPLAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LIVE_REPLAY" + } + }, + "ON_DEMAND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ON_DEMAND" + } + } } }, "com.amazonaws.kinesisvideoarchivedmedia#HLSStreamingSessionURL": { @@ -1888,18 +1997,18 @@ "StartTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The start of the timestamp range for the requested media.

\n

If the HLSTimestampRange value is specified, the\n StartTimestamp value is required.

\n

Only fragments that start exactly at or after StartTimestamp are included\n in the session. Fragments that start before StartTimestamp and continue\n past it aren't included in the session. If FragmentSelectorType is\n SERVER_TIMESTAMP, the StartTimestamp must be later than\n the stream head.

" + "smithy.api#documentation": "

The start of the timestamp range for the requested media.

\n

If the HLSTimestampRange value is specified, the\n StartTimestamp value is required.

\n

Only fragments that start exactly at or after StartTimestamp are included\n in the session. Fragments that start before StartTimestamp and continue\n past it aren't included in the session. If FragmentSelectorType is\n SERVER_TIMESTAMP, the StartTimestamp must be later than\n the stream head.

" } }, "EndTimestamp": { "target": "com.amazonaws.kinesisvideoarchivedmedia#Timestamp", "traits": { - "smithy.api#documentation": "

The end of the timestamp range for the requested media. This value must be within 24\n hours of the specified StartTimestamp, and it must be later than the\n StartTimestamp value.

\n

If FragmentSelectorType for the request is SERVER_TIMESTAMP,\n this value must be in the past.

\n

The EndTimestamp value is required for ON_DEMAND mode, but\n optional for LIVE_REPLAY mode. If the EndTimestamp is not set\n for LIVE_REPLAY mode then the session will continue to include newly\n ingested fragments until the session expires.

\n \n

This value is inclusive. The EndTimestamp is compared to the\n (starting) timestamp of the fragment. Fragments that start before the\n EndTimestamp value and continue past it are included in the\n session.

\n
" + "smithy.api#documentation": "

The end of the timestamp range for the requested media. This value must be within 24\n hours of the specified StartTimestamp, and it must be later than the\n StartTimestamp value.

\n

If FragmentSelectorType for the request is SERVER_TIMESTAMP,\n this value must be in the past.

\n

The EndTimestamp value is required for ON_DEMAND mode, but\n optional for LIVE_REPLAY mode. If the EndTimestamp is not set\n for LIVE_REPLAY mode then the session will continue to include newly\n ingested fragments until the session expires.

\n \n

This value is inclusive. The EndTimestamp is compared to the\n (starting) timestamp of the fragment. Fragments that start before the\n EndTimestamp value and continue past it are included in the\n session.

\n
" } } }, "traits": { - "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

" + "smithy.api#documentation": "

The start and end of the timestamp range for the requested media.

\n

This value should not be present if PlaybackType is\n LIVE.

" } }, "com.amazonaws.kinesisvideoarchivedmedia#HeightPixels": { @@ -1923,7 +2032,7 @@ "Error": { "target": "com.amazonaws.kinesisvideoarchivedmedia#ImageError", "traits": { - "smithy.api#documentation": "

The error message shown when the image for the provided timestamp was not extracted due to a non-tryable error. An error will be returned if:

\n \n " + "smithy.api#documentation": "

The error message shown when the image for the provided timestamp was not extracted due to a non-tryable error. An error will be returned if:

\n \n " } }, "ImageContent": { @@ -1947,33 +2056,37 @@ } }, "com.amazonaws.kinesisvideoarchivedmedia#ImageError": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "NO_MEDIA", - "name": "NO_MEDIA" - }, - { - "value": "MEDIA_ERROR", - "name": "MEDIA_ERROR" + "type": "enum", + "members": { + "NO_MEDIA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_MEDIA" + } + }, + "MEDIA_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEDIA_ERROR" } - ] + } } }, "com.amazonaws.kinesisvideoarchivedmedia#ImageSelectorType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PRODUCER_TIMESTAMP", - "name": "PRODUCER_TIMESTAMP" - }, - { - "value": "SERVER_TIMESTAMP", - "name": "SERVER_TIMESTAMP" + "type": "enum", + "members": { + "PRODUCER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRODUCER_TIMESTAMP" } - ] + }, + "SERVER_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVER_TIMESTAMP" + } + } } }, "com.amazonaws.kinesisvideoarchivedmedia#Images": { @@ -2044,7 +2157,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of Fragment objects from the specified stream and\n timestamp range within the archived data.

\n

Listing fragments is eventually consistent. This means that even if the producer\n receives an acknowledgment that a fragment is persisted, the result might not be\n returned immediately from a request to ListFragments. However, results are\n typically available in less than one second.

\n \n

You must first call the GetDataEndpoint API to get an endpoint.\n Then send the ListFragments requests to this endpoint using the --endpoint-url\n parameter.

\n
\n\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", + "smithy.api#documentation": "

Returns a list of Fragment objects from the specified stream and\n timestamp range within the archived data.

\n

Listing fragments is eventually consistent. This means that even if the producer\n receives an acknowledgment that a fragment is persisted, the result might not be\n returned immediately from a request to ListFragments. However, results are\n typically available in less than one second.

\n \n

You must first call the GetDataEndpoint API to get an endpoint.\n Then send the ListFragments requests to this endpoint using the --endpoint-url\n parameter.

\n
\n \n

If an error is thrown after invoking a Kinesis Video Streams archived media API,\n in addition to the HTTP status code and the response body, it includes the following\n pieces of information:

\n \n

Both the HTTP status code and the ErrorType header can be utilized to make\n programmatic decisions about whether errors are retry-able and under what\n conditions, as well as provide information on what actions the client programmer\n might need to take in order to successfully try again.

\n

For more information, see the Errors section at\n the bottom of this topic, as well as Common Errors.\n

\n
", "smithy.api#http": { "method": "POST", "uri": "/listFragments", @@ -2085,6 +2198,9 @@ "smithy.api#documentation": "

Describes the timestamp range and timestamp origin for the range of fragments to\n return.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#ListFragmentsMaxResults": { @@ -2111,6 +2227,9 @@ "smithy.api#documentation": "

If the returned list is truncated, the operation returns this token to use to retrieve\n the next page of results. This value is null when there are no more results\n to return.

" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.kinesisvideoarchivedmedia#Long": { @@ -2192,19 +2311,13 @@ } }, "traits": { - "smithy.api#documentation": "

\n GetMedia throws this error when Kinesis Video Streams can't find the stream\n that you specified.

\n

\n GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw\n this error if a session with a PlaybackMode of ON_DEMAND or\n LIVE_REPLAYis requested for a stream that has no fragments within the\n requested time range, or if a session with a PlaybackMode of\n LIVE is requested for a stream that has no fragments within the last 30\n seconds.

", + "smithy.api#documentation": "

\n GetImages will throw this error when Kinesis Video Streams can't find the stream\n that you specified.

\n

\n GetHLSStreamingSessionURL and GetDASHStreamingSessionURL throw\n this error if a session with a PlaybackMode of ON_DEMAND or\n LIVE_REPLAYis requested for a stream that has no fragments within the\n requested time range, or if a session with a PlaybackMode of\n LIVE is requested for a stream that has no fragments within the last 30\n seconds.

", "smithy.api#error": "client", "smithy.api#httpError": 404 } }, "com.amazonaws.kinesisvideoarchivedmedia#SamplingInterval": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 3000, - "max": 20000 - } - } + "type": "integer" }, "com.amazonaws.kinesisvideoarchivedmedia#StreamName": { "type": "string", diff --git a/codegen/sdk-codegen/aws-models/kinesis-video.json b/codegen/sdk-codegen/aws-models/kinesis-video.json index b4f82b6a02d..164994cb4b4 100644 --- a/codegen/sdk-codegen/aws-models/kinesis-video.json +++ b/codegen/sdk-codegen/aws-models/kinesis-video.json @@ -1899,52 +1899,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -1952,13 +1956,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -1968,224 +1981,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://kinesisvideo-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://kinesisvideo.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://kinesisvideo.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] }, @@ -3622,13 +3586,7 @@ } }, "com.amazonaws.kinesisvideo#SamplingInterval": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 3000, - "max": 20000 - } - } + "type": "integer" }, "com.amazonaws.kinesisvideo#ScheduleConfig": { "type": "structure", @@ -4574,7 +4532,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associates a SignalingChannel to a stream to store the media. There are two signaling modes that \n can specified :

\n ", + "smithy.api#documentation": "

Associates a SignalingChannel to a stream to store the media. There are two signaling modes that \n can specified :

\n \n \n

If StorageStatus is enabled, direct peer-to-peer (master-viewer) connections no\n longer occur. Peers connect directly to the storage session. You must call the\n JoinStorageSession API to trigger an SDP offer send and establish a\n connection between a peer and the storage session.

\n
", "smithy.api#http": { "method": "POST", "uri": "/updateMediaStorageConfiguration", diff --git a/codegen/sdk-codegen/aws-models/rekognition.json b/codegen/sdk-codegen/aws-models/rekognition.json index d42a1cdaece..4693921e0c7 100644 --- a/codegen/sdk-codegen/aws-models/rekognition.json +++ b/codegen/sdk-codegen/aws-models/rekognition.json @@ -202,7 +202,7 @@ "ClientRequestToken": { "target": "com.amazonaws.rekognition#ClientRequestToken", "traits": { - "smithy.api#documentation": "

Idempotent token used to identify the request to AssociateFaces. If you use\n the same token with multiple AssociateFaces requests, the same response is returned.\n Use ClientRequestToken to prevent the same request from being processed more than\n once.

", + "smithy.api#documentation": "

Idempotent token used to identify the request to AssociateFaces. If you use\n the same token with multiple AssociateFaces requests, the same response is\n returned. Use ClientRequestToken to prevent the same request from being processed more than\n once.

", "smithy.api#idempotencyToken": {} } } @@ -402,7 +402,7 @@ "Bytes": { "target": "com.amazonaws.rekognition#LivenessImageBlob", "traits": { - "smithy.api#documentation": "

The Base64-encoded bytes representing an image selected from the Face Liveness video and returned for audit purposes.

" + "smithy.api#documentation": "

The Base64-encoded bytes representing an image selected from the Face Liveness video and\n returned for audit purposes.

" } }, "S3Object": { @@ -413,7 +413,7 @@ } }, "traits": { - "smithy.api#documentation": "

An image that is picked from the Face Liveness video and returned for audit trail purposes, returned as Base64-encoded bytes.

" + "smithy.api#documentation": "

An image that is picked from the Face Liveness video and returned for audit trail\n purposes, returned as Base64-encoded bytes.

" } }, "com.amazonaws.rekognition#AuditImages": { @@ -1011,7 +1011,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n A User with the same Id already exists within the collection, or the update or deletion of the User caused an inconsistent state. ** \n

", + "smithy.api#documentation": "

A User with the same Id already exists within the collection, or the update or deletion\n of the User caused an inconsistent state. **

", "smithy.api#error": "client" } }, @@ -1136,19 +1136,19 @@ "StartTimestampMillis": { "target": "com.amazonaws.rekognition#ULong", "traits": { - "smithy.api#documentation": "

The time in milliseconds defining the start of the timeline \n segment containing a continuously detected moderation label.

" + "smithy.api#documentation": "

The time in milliseconds defining the start of the timeline segment containing a\n continuously detected moderation label.

" } }, "EndTimestampMillis": { "target": "com.amazonaws.rekognition#ULong", "traits": { - "smithy.api#documentation": "

The time in milliseconds defining the end of the \n timeline segment containing a continuously detected moderation label.

" + "smithy.api#documentation": "

The time in milliseconds defining the end of the timeline segment containing a\n continuously detected moderation label.

" } }, "DurationMillis": { "target": "com.amazonaws.rekognition#ULong", "traits": { - "smithy.api#documentation": "

The time duration of a segment in milliseconds, \n I.e. time elapsed from StartTimestampMillis to EndTimestampMillis.

" + "smithy.api#documentation": "

The time duration of a segment in milliseconds, I.e. time elapsed from\n StartTimestampMillis to EndTimestampMillis.

" } } }, @@ -1221,7 +1221,7 @@ "smithy.api#examples": [ { "title": "CopyProjectVersion", - "documentation": "This operation copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project.", + "documentation": "Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project.", "input": { "SourceProjectArn": "arn:aws:rekognition:us-east-1:111122223333:project/SourceProject/16565123456", "SourceProjectVersionArn": "arn:aws:rekognition:us-east-1:111122223333:project/SourceProject/version/model_1/1656611123456", @@ -1466,7 +1466,28 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using\n an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset.

\n

To create a training dataset for a project, specify train for the value of \n DatasetType. To create the test dataset for a project,\n specify test for the value of DatasetType.\n

\n

The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset.\n Creating a dataset takes a while to complete. Use DescribeDataset to check the \n current status. The dataset created successfully if the value of Status is\n CREATE_COMPLETE.

\n

To check if any non-terminal errors occurred, call ListDatasetEntries\nand check for the presence of errors lists in the JSON Lines.

\n

Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). \n Currently, you can't access the terminal error information.\n \n

\n

For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide.

\n

This operation requires permissions to perform the rekognition:CreateDataset action.\n If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.

" + "smithy.api#documentation": "

Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using\n an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset.

\n

To create a training dataset for a project, specify TRAIN for the value of \n DatasetType. To create the test dataset for a project,\n specify TEST for the value of DatasetType.\n

\n

The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset.\n Creating a dataset takes a while to complete. Use DescribeDataset to check the \n current status. The dataset created successfully if the value of Status is\n CREATE_COMPLETE.

\n

To check if any non-terminal errors occurred, call ListDatasetEntries\nand check for the presence of errors lists in the JSON Lines.

\n

Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). \n Currently, you can't access the terminal error information.\n \n

\n

For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide.

\n

This operation requires permissions to perform the rekognition:CreateDataset action.\n If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.

", + "smithy.api#examples": [ + { + "title": "To create an Amazon Rekognition Custom Labels dataset", + "documentation": "Creates an Amazon Rekognition Custom Labels dataset with a manifest file stored in an Amazon S3 bucket.", + "input": { + "DatasetSource": { + "GroundTruthManifest": { + "S3Object": { + "Bucket": "my-bucket", + "Name": "datasets/flowers_training/manifests/output/output.manifest" + } + } + }, + "DatasetType": "TRAIN", + "ProjectArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/1690474772815" + }, + "output": { + "DatasetArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/dataset/train/1690476084535" + } + } + ] } }, "com.amazonaws.rekognition#CreateDatasetRequest": { @@ -1481,7 +1502,7 @@ "DatasetType": { "target": "com.amazonaws.rekognition#DatasetType", "traits": { - "smithy.api#documentation": "

\nThe type of the dataset. Specify train to create a training dataset. Specify test \n to create a test dataset.\n

", + "smithy.api#documentation": "

\nThe type of the dataset. Specify TRAIN to create a training dataset. Specify TEST \n to create a test dataset.\n

", "smithy.api#required": {} } }, @@ -1537,7 +1558,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API operation initiates a Face Liveness session. It returns a SessionId,\n which you can use to start streaming Face Liveness video and get the results for a Face\n Liveness session. You can use the OutputConfig option in the Settings parameter\n to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images.\n You can use AuditImagesLimit to limit the number of audit images returned. This\n number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on\n the duration of the selfie-video.

", + "smithy.api#documentation": "

This API operation initiates a Face Liveness session. It returns a SessionId,\n which you can use to start streaming Face Liveness video and get the results for a Face\n Liveness session.

\n

You can use the OutputConfig option in the Settings parameter to provide an\n Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. If no Amazon S3\n bucket is defined, raw bytes are sent instead.

\n

You can use AuditImagesLimit to limit the number of audit images returned\n when GetFaceLivenessSessionResults is called. This number is between 0 and 4. By\n default, it is set to 0. The limit is best effort and based on the duration of the\n selfie-video.

", "smithy.api#idempotent": {} } }, @@ -1547,7 +1568,7 @@ "KmsKeyId": { "target": "com.amazonaws.rekognition#KmsKeyId", "traits": { - "smithy.api#documentation": "

The identifier for your AWS Key Management Service key (AWS KMS key). \n Used to encrypt audit images and reference images.

" + "smithy.api#documentation": "

The identifier for your AWS Key Management Service key (AWS KMS key). Used to encrypt\n audit images and reference images.

" } }, "Settings": { @@ -1584,7 +1605,7 @@ } }, "traits": { - "smithy.api#documentation": "

A session settings object. It contains settings for the operation \n to be performed. It accepts arguments for OutputConfig and AuditImagesLimit.

" + "smithy.api#documentation": "

A session settings object. It contains settings for the operation to be performed. It\n accepts arguments for OutputConfig and AuditImagesLimit.

" } }, "com.amazonaws.rekognition#CreateFaceLivenessSessionResponse": { @@ -1634,7 +1655,19 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources (datasets, model versions) \n that you use to create and manage Amazon Rekognition Custom Labels models.

\n

This operation requires permissions to perform the rekognition:CreateProject action.

" + "smithy.api#documentation": "

Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources (datasets, model versions) \n that you use to create and manage Amazon Rekognition Custom Labels models.

\n

This operation requires permissions to perform the rekognition:CreateProject action.

", + "smithy.api#examples": [ + { + "title": "To create an Amazon Rekognition Custom Labels project", + "documentation": "Creates an Amazon Rekognition Custom Labels project.", + "input": { + "ProjectName": "my-project" + }, + "output": { + "ProjectArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/1690405809285" + } + } + ] } }, "com.amazonaws.rekognition#CreateProjectRequest": { @@ -1704,7 +1737,24 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new version of a model and begins training. \n Models are managed as part of an Amazon Rekognition Custom Labels project. \n The response from CreateProjectVersion\n is an Amazon Resource Name (ARN) for the version of the model.

\n

Training uses the training and test datasets associated with the project. \n For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide.\n

\n \n

You can train a model in a project that doesn't have associated datasets by specifying manifest files in the\n TrainingData and TestingData fields.\n

\n

If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates\n the datasets for you using the most recent manifest files. You can no longer train\n a model version for the project by specifying manifest files.

\n

Instead of training with a project without associated datasets,\n we recommend that you use the manifest\n files to create training and test datasets for the project.

\n
\n

Training takes a while to complete. You can get the current status by calling\n DescribeProjectVersions. Training completed successfully if\n the value of the Status field is TRAINING_COMPLETED.

\n

If training \n fails, see Debugging a failed model training in the Amazon Rekognition Custom Labels developer guide.

\n

Once training has successfully completed, call DescribeProjectVersions to\n get the training results and evaluate the model. For more information, see Improving a trained Amazon Rekognition Custom Labels model\n in the Amazon Rekognition Custom Labels developers guide.\n

\n

After evaluating the model, you start the model\n by calling StartProjectVersion.

\n

This operation requires permissions to perform the rekognition:CreateProjectVersion action.

" + "smithy.api#documentation": "

Creates a new version of a model and begins training. \n Models are managed as part of an Amazon Rekognition Custom Labels project. \n The response from CreateProjectVersion\n is an Amazon Resource Name (ARN) for the version of the model.

\n

Training uses the training and test datasets associated with the project. \n For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide.\n

\n \n

You can train a model in a project that doesn't have associated datasets by specifying manifest files in the\n TrainingData and TestingData fields.\n

\n

If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates\n the datasets for you using the most recent manifest files. You can no longer train\n a model version for the project by specifying manifest files.

\n

Instead of training with a project without associated datasets,\n we recommend that you use the manifest\n files to create training and test datasets for the project.

\n
\n

Training takes a while to complete. You can get the current status by calling\n DescribeProjectVersions. Training completed successfully if\n the value of the Status field is TRAINING_COMPLETED.

\n

If training \n fails, see Debugging a failed model training in the Amazon Rekognition Custom Labels developer guide.

\n

Once training has successfully completed, call DescribeProjectVersions to\n get the training results and evaluate the model. For more information, see Improving a trained Amazon Rekognition Custom Labels model\n in the Amazon Rekognition Custom Labels developers guide.\n

\n

After evaluating the model, you start the model\n by calling StartProjectVersion.

\n

This operation requires permissions to perform the rekognition:CreateProjectVersion action.

", + "smithy.api#examples": [ + { + "title": "To train an Amazon Rekognition Custom Labels model", + "documentation": "Trains a version of an Amazon Rekognition Custom Labels model.", + "input": { + "ProjectArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/1690474772815", + "VersionName": "1", + "OutputConfig": { + "S3Bucket": "output_bucket", + "S3KeyPrefix": "output_folder" + } + }, + "output": { + "ProjectVersionArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/version/1/1690556751958" + } + } + ] } }, "com.amazonaws.rekognition#CreateProjectVersionRequest": { @@ -1934,7 +1984,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new User within a collection specified by CollectionId. Takes\n UserId as a parameter, which is a user provided ID which should be unique\n within the collection. The provided UserId will alias the system generated\n UUID to make the UserId more user friendly.

\n

Uses a ClientToken, an idempotency token that ensures a call to\n CreateUser completes only once. If the value is not supplied, the AWS SDK\n generates an idempotency token for the requests. This prevents retries after a network\n error results from making multiple CreateUser calls.

", + "smithy.api#documentation": "

Creates a new User within a collection specified by CollectionId. Takes\n UserId as a parameter, which is a user provided ID which should be unique\n within the collection. The provided UserId will alias the system generated UUID\n to make the UserId more user friendly.

\n

Uses a ClientToken, an idempotency token that ensures a call to\n CreateUser completes only once. If the value is not supplied, the AWS SDK\n generates an idempotency token for the requests. This prevents retries after a network error\n results from making multiple CreateUser calls.

", "smithy.api#examples": [ { "title": "CreateUser", @@ -1967,7 +2017,7 @@ "ClientRequestToken": { "target": "com.amazonaws.rekognition#ClientRequestToken", "traits": { - "smithy.api#documentation": "

Idempotent token used to identify the request to CreateUser. If you use the\n same token with multiple CreateUser requests, the same response is returned. \n Use ClientRequestToken to prevent the same request from being processed more than\n once.

", + "smithy.api#documentation": "

Idempotent token used to identify the request to CreateUser. If you use the\n same token with multiple CreateUser requests, the same response is returned. Use\n ClientRequestToken to prevent the same request from being processed more than once.

", "smithy.api#idempotencyToken": {} } } @@ -2474,7 +2524,17 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing Amazon Rekognition Custom Labels dataset.\n Deleting a dataset might take while. Use DescribeDataset to check the \n current status. The dataset is still deleting if the value of Status is\n DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get\n a ResourceNotFoundException exception.\n\n

\n

You can't delete a dataset while it is creating (Status = CREATE_IN_PROGRESS)\n or if the dataset is updating (Status = UPDATE_IN_PROGRESS).

\n

This operation requires permissions to perform the rekognition:DeleteDataset action.

" + "smithy.api#documentation": "

Deletes an existing Amazon Rekognition Custom Labels dataset.\n Deleting a dataset might take while. Use DescribeDataset to check the \n current status. The dataset is still deleting if the value of Status is\n DELETE_IN_PROGRESS. If you try to access the dataset after it is deleted, you get\n a ResourceNotFoundException exception.\n\n

\n

You can't delete a dataset while it is creating (Status = CREATE_IN_PROGRESS)\n or if the dataset is updating (Status = UPDATE_IN_PROGRESS).

\n

This operation requires permissions to perform the rekognition:DeleteDataset action.

", + "smithy.api#examples": [ + { + "title": "To delete an Amazon Rekognition Custom Labels dataset", + "documentation": "Deletes an Amazon Rekognition Custom Labels dataset.", + "input": { + "DatasetArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/dataset/test/1690556733321" + }, + "output": {} + } + ] } }, "com.amazonaws.rekognition#DeleteDatasetRequest": { @@ -2622,7 +2682,19 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first delete all models associated \n with the project. To delete a model, see DeleteProjectVersion.

\n

\n DeleteProject is an asynchronous operation. To check if the project is\n deleted, call DescribeProjects. The project is deleted when the project\n no longer appears in the response. Be aware that deleting a given project will also delete\n any ProjectPolicies associated with that project.

\n

This operation requires permissions to perform the\n rekognition:DeleteProject action.

" + "smithy.api#documentation": "

Deletes an Amazon Rekognition Custom Labels project. To delete a project you must first delete all models associated \n with the project. To delete a model, see DeleteProjectVersion.

\n

\n DeleteProject is an asynchronous operation. To check if the project is\n deleted, call DescribeProjects. The project is deleted when the project\n no longer appears in the response. Be aware that deleting a given project will also delete\n any ProjectPolicies associated with that project.

\n

This operation requires permissions to perform the\n rekognition:DeleteProject action.

", + "smithy.api#examples": [ + { + "title": "To delete an Amazon Rekognition Custom Labels project", + "documentation": "Deletes an Amazon Rekognition Custom Labels projects.", + "input": { + "ProjectArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/1690405809285" + }, + "output": { + "Status": "DELETING" + } + } + ] } }, "com.amazonaws.rekognition#DeleteProjectPolicy": { @@ -2768,7 +2840,19 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an Amazon Rekognition Custom Labels model.

\n

You can't delete a model if it is running or if it is training. \n To check the status of a model, use the Status field returned\n from DescribeProjectVersions.\n To stop a running model call StopProjectVersion. If the model\n is training, wait until it finishes.

\n

This operation requires permissions to perform the\n rekognition:DeleteProjectVersion action.

" + "smithy.api#documentation": "

Deletes an Amazon Rekognition Custom Labels model.

\n

You can't delete a model if it is running or if it is training. \n To check the status of a model, use the Status field returned\n from DescribeProjectVersions.\n To stop a running model call StopProjectVersion. If the model\n is training, wait until it finishes.

\n

This operation requires permissions to perform the\n rekognition:DeleteProjectVersion action.

", + "smithy.api#examples": [ + { + "title": "To delete an Amazon Rekognition Custom Labels model", + "documentation": "Deletes a version of an Amazon Rekognition Custom Labels model.", + "input": { + "ProjectVersionArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/version/1/1690556751958" + }, + "output": { + "Status": "DELETING" + } + } + ] } }, "com.amazonaws.rekognition#DeleteProjectVersionRequest": { @@ -2926,7 +3010,7 @@ "ClientRequestToken": { "target": "com.amazonaws.rekognition#ClientRequestToken", "traits": { - "smithy.api#documentation": "

Idempotent token used to identify the request to DeleteUser. If you use the\n same token with multiple DeleteUser requests, the same response is returned. \n Use ClientRequestToken to prevent the same request from being processed more than\n once.

", + "smithy.api#documentation": "

Idempotent token used to identify the request to DeleteUser. If you use the\n same token with multiple DeleteUser requests, the same response is returned. Use\n ClientRequestToken to prevent the same request from being processed more than once.

", "smithy.api#idempotencyToken": {} } } @@ -3500,7 +3584,36 @@ } ], "traits": { - "smithy.api#documentation": "

Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

\n

You specify which version of a model version to use by using the ProjectVersionArn input\n parameter.

\n

You pass the input image as base64-encoded image bytes or as a reference to an image in\n an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing\n image bytes is not supported. The image must be either a PNG or JPEG formatted file.

\n

For each object that the model version detects on an image, the API returns a \n (CustomLabel) object in an array (CustomLabels).\n Each CustomLabel object provides the label name (Name), the level\n of confidence that the image contains the object (Confidence), and \n object location information, if it exists, for the label on the image (Geometry).

\n

To filter labels that are returned, specify a value for MinConfidence.\n DetectCustomLabelsLabels only returns labels with a confidence that's higher than\n the specified value.\n\n The value of MinConfidence maps to the assumed threshold values\n created during training. For more information, see Assumed threshold\n in the Amazon Rekognition Custom Labels Developer Guide. \n Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of\n MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence\n responses from DetectCustomLabels are also returned as a percentage. \n You can use MinConfidence to change the precision and recall or your model. \n For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

\n

If you don't specify a value for MinConfidence, DetectCustomLabels\n returns labels based on the assumed threshold of each label.

\n

This is a stateless API operation. That is, the operation does not persist any\n data.

\n

This operation requires permissions to perform the\n rekognition:DetectCustomLabels action.

\n

For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

" + "smithy.api#documentation": "

Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

\n

You specify which version of a model version to use by using the ProjectVersionArn input\n parameter.

\n

You pass the input image as base64-encoded image bytes or as a reference to an image in\n an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing\n image bytes is not supported. The image must be either a PNG or JPEG formatted file.

\n

For each object that the model version detects on an image, the API returns a \n (CustomLabel) object in an array (CustomLabels).\n Each CustomLabel object provides the label name (Name), the level\n of confidence that the image contains the object (Confidence), and \n object location information, if it exists, for the label on the image (Geometry).

\n

To filter labels that are returned, specify a value for MinConfidence.\n DetectCustomLabelsLabels only returns labels with a confidence that's higher than\n the specified value.\n\n The value of MinConfidence maps to the assumed threshold values\n created during training. For more information, see Assumed threshold\n in the Amazon Rekognition Custom Labels Developer Guide. \n Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of\n MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence\n responses from DetectCustomLabels are also returned as a percentage. \n You can use MinConfidence to change the precision and recall or your model. \n For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

\n

If you don't specify a value for MinConfidence, DetectCustomLabels\n returns labels based on the assumed threshold of each label.

\n

This is a stateless API operation. That is, the operation does not persist any\n data.

\n

This operation requires permissions to perform the\n rekognition:DetectCustomLabels action.

\n

For more information, see \n Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To detect custom labels in an image with an Amazon Rekognition Custom Labels model", + "documentation": "Detects custom labels in an image with an Amazon Rekognition Custom Labels model", + "input": { + "ProjectVersionArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/version/my-project.2023-07-31T11.49.37/1690829378219", + "Image": { + "S3Object": { + "Bucket": "custom-labels-console-us-east-1-1111111111", + "Name": "assets/flowers_1_test_dataset/camellia4.jpg" + } + }, + "MaxResults": 100, + "MinConfidence": 50 + }, + "output": { + "CustomLabels": [ + { + "Name": "with_leaves", + "Confidence": 67.56399536132812 + }, + { + "Name": "without_leaves", + "Confidence": 50.65699768066406 + } + ] + } + } + ] } }, "com.amazonaws.rekognition#DetectCustomLabelsRequest": { @@ -3665,7 +3778,7 @@ "Attributes": { "target": "com.amazonaws.rekognition#Attributes", "traits": { - "smithy.api#documentation": "

An array of facial attributes you want to be returned. A DEFAULT subset of\n facial attributes - BoundingBox, Confidence, Pose,\n Quality, and Landmarks - will always be returned. You can request\n for specific facial attributes (in addition to the default list) - by using [\"DEFAULT\",\n \"FACE_OCCLUDED\"] or just [\"FACE_OCCLUDED\"]. You can request for all\n facial attributes by using [\"ALL\"]. Requesting more attributes may increase\n response time.

\n

If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical \"AND\"\n operator to determine which attributes to return (in this case, all attributes).

" + "smithy.api#documentation": "

An array of facial attributes you want to be returned. A DEFAULT subset of\n facial attributes - BoundingBox, Confidence, Pose,\n Quality, and Landmarks - will always be returned. You can request\n for specific facial attributes (in addition to the default list) - by using [\"DEFAULT\",\n \"FACE_OCCLUDED\"] or just [\"FACE_OCCLUDED\"]. You can request for all\n facial attributes by using [\"ALL\"]. Requesting more attributes may increase\n response time.

\n

If you provide both, [\"ALL\", \"DEFAULT\"], the service uses a logical \"AND\"\n operator to determine which attributes to return (in this case, all attributes).

\n

Note that while the FaceOccluded and EyeDirection attributes are supported when using\n DetectFaces, they aren't supported when analyzing videos with\n StartFaceDetection and GetFaceDetection.

" } } }, @@ -3728,7 +3841,7 @@ } ], "traits": { - "smithy.api#documentation": "

Detects instances of real-world entities within an image (JPEG or PNG) provided as\n input. This includes objects like flower, tree, and table; events like wedding, graduation,\n and birthday party; and concepts like landscape, evening, and nature.

\n

For an example, see Analyzing images stored in an Amazon S3 bucket in the\n Amazon Rekognition Developer Guide.

\n

You pass the input image as base64-encoded image bytes or as a reference to an image in\n an Amazon S3 bucket. If you use the\n AWS\n CLI to call Amazon Rekognition operations, passing image bytes is not\n supported. The image must be either a PNG or JPEG formatted file.

\n

\n Optional Parameters\n

\n

You can specify one or both of the GENERAL_LABELS and\n IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including\n GENERAL_LABELS will ensure the response includes the labels detected in the\n input image, while including IMAGE_PROPERTIES will ensure the response includes\n information about the image quality and color.

\n

When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can\n provide filtering criteria to the Settings parameter. You can filter with sets of individual\n labels or with label categories. You can specify inclusive filters, exclusive filters, or a\n combination of inclusive and exclusive filters. For more information on filtering see Detecting\n Labels in an Image.

\n

You can specify MinConfidence to control the confidence threshold for the\n labels returned. The default is 55%. You can also add the MaxLabels parameter to\n limit the number of labels returned. The default and upper limit is 1000 labels.

\n

\n Response Elements\n

\n

For each object, scene, and concept the API returns one or more labels. The API\n returns the following types of information about labels:

\n \n

The API returns the following information regarding the image, as part of the\n ImageProperties structure:

\n \n

The list of returned labels will include at least one label for every detected object,\n along with information about that label. In the following example, suppose the input image has\n a lighthouse, the sea, and a rock. The response includes all three labels, one for each\n object, as well as the confidence in the label:

\n

\n {Name: lighthouse, Confidence: 98.4629}\n

\n

\n {Name: rock,Confidence: 79.2097}\n

\n

\n {Name: sea,Confidence: 75.061}\n

\n

The list of labels can include multiple labels for the same object. For example, if the\n input image shows a flower (for example, a tulip), the operation might return the following\n three labels.

\n

\n {Name: flower,Confidence: 99.0562}\n

\n

\n {Name: plant,Confidence: 99.0562}\n

\n

\n {Name: tulip,Confidence: 99.0562}\n

\n

In this example, the detection algorithm more precisely identifies the flower as a\n tulip.

\n \n

If the object detected is a person, the operation doesn't provide the same facial\n details that the DetectFaces operation provides.

\n
\n

This is a stateless API operation that doesn't return any data.

\n

This operation requires permissions to perform the\n rekognition:DetectLabels action.

", + "smithy.api#documentation": "

Detects instances of real-world entities within an image (JPEG or PNG) provided as\n input. This includes objects like flower, tree, and table; events like wedding, graduation,\n and birthday party; and concepts like landscape, evening, and nature.

\n

For an example, see Analyzing images stored in an Amazon S3 bucket in the\n Amazon Rekognition Developer Guide.

\n

You pass the input image as base64-encoded image bytes or as a reference to an image in\n an Amazon S3 bucket. If you use the\n AWS\n CLI to call Amazon Rekognition operations, passing image bytes is not\n supported. The image must be either a PNG or JPEG formatted file.

\n

\n Optional Parameters\n

\n

You can specify one or both of the GENERAL_LABELS and\n IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including\n GENERAL_LABELS will ensure the response includes the labels detected in the\n input image, while including IMAGE_PROPERTIES will ensure the response includes\n information about the image quality and color.

\n

When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can\n provide filtering criteria to the Settings parameter. You can filter with sets of individual\n labels or with label categories. You can specify inclusive filters, exclusive filters, or a\n combination of inclusive and exclusive filters. For more information on filtering see Detecting\n Labels in an Image.

\n

When getting labels, you can specify MinConfidence to control the\n confidence threshold for the labels returned. The default is 55%. You can also add the\n MaxLabels parameter to limit the number of labels returned. The default and\n upper limit is 1000 labels. These arguments are only valid when supplying GENERAL_LABELS as a\n feature type.

\n

\n Response Elements\n

\n

For each object, scene, and concept the API returns one or more labels. The API\n returns the following types of information about labels:

\n \n

The API returns the following information regarding the image, as part of the\n ImageProperties structure:

\n \n

The list of returned labels will include at least one label for every detected object,\n along with information about that label. In the following example, suppose the input image has\n a lighthouse, the sea, and a rock. The response includes all three labels, one for each\n object, as well as the confidence in the label:

\n

\n {Name: lighthouse, Confidence: 98.4629}\n

\n

\n {Name: rock,Confidence: 79.2097}\n

\n

\n {Name: sea,Confidence: 75.061}\n

\n

The list of labels can include multiple labels for the same object. For example, if the\n input image shows a flower (for example, a tulip), the operation might return the following\n three labels.

\n

\n {Name: flower,Confidence: 99.0562}\n

\n

\n {Name: plant,Confidence: 99.0562}\n

\n

\n {Name: tulip,Confidence: 99.0562}\n

\n

In this example, the detection algorithm more precisely identifies the flower as a\n tulip.

\n \n

If the object detected is a person, the operation doesn't provide the same facial\n details that the DetectFaces operation provides.

\n
\n

This is a stateless API operation that doesn't return any data.

\n

This operation requires permissions to perform the\n rekognition:DetectLabels action.

", "smithy.api#examples": [ { "title": "To detect labels", @@ -3800,7 +3913,7 @@ "DominantColors": { "target": "com.amazonaws.rekognition#DominantColors", "traits": { - "smithy.api#documentation": "

The dominant colors found in the background of an image, defined with RGB values, \n CSS color name, simplified color name, and PixelPercentage (the percentage of \n image pixels that have a particular color).

" + "smithy.api#documentation": "

The dominant colors found in the background of an image, defined with RGB values, CSS\n color name, simplified color name, and PixelPercentage (the percentage of image pixels that\n have a particular color).

" } } }, @@ -3820,7 +3933,7 @@ "DominantColors": { "target": "com.amazonaws.rekognition#DominantColors", "traits": { - "smithy.api#documentation": "

The dominant colors found in the foreground of an image, defined with RGB values, \n CSS color name, simplified color name, and PixelPercentage (the percentage of image \n pixels that have a particular color).

" + "smithy.api#documentation": "

The dominant colors found in the foreground of an image, defined with RGB values, CSS\n color name, simplified color name, and PixelPercentage (the percentage of image pixels that\n have a particular color).

" } } }, @@ -3834,30 +3947,30 @@ "Quality": { "target": "com.amazonaws.rekognition#DetectLabelsImageQuality", "traits": { - "smithy.api#documentation": "

Information about the quality of the image foreground as defined by brightness, \n sharpness, and contrast. The higher the value the greater the brightness, \n sharpness, and contrast respectively.

" + "smithy.api#documentation": "

Information about the quality of the image foreground as defined by brightness, sharpness,\n and contrast. The higher the value the greater the brightness, sharpness, and contrast\n respectively.

" } }, "DominantColors": { "target": "com.amazonaws.rekognition#DominantColors", "traits": { - "smithy.api#documentation": "

Information about the dominant colors found in an image, described with RGB values, \n CSS color name, simplified color name, and PixelPercentage (the percentage of image pixels \n that have a particular color).

" + "smithy.api#documentation": "

Information about the dominant colors found in an image, described with RGB values, CSS\n color name, simplified color name, and PixelPercentage (the percentage of image pixels that\n have a particular color).

" } }, "Foreground": { "target": "com.amazonaws.rekognition#DetectLabelsImageForeground", "traits": { - "smithy.api#documentation": "

Information about the properties of an image’s foreground, including the \n foreground’s quality and dominant colors, including the quality and dominant colors of the image.

" + "smithy.api#documentation": "

Information about the properties of an image’s foreground, including the foreground’s\n quality and dominant colors, including the quality and dominant colors of the image.

" } }, "Background": { "target": "com.amazonaws.rekognition#DetectLabelsImageBackground", "traits": { - "smithy.api#documentation": "

Information about the properties of an image’s background, including \n the background’s quality and dominant colors, including the quality \n and dominant colors of the image.

" + "smithy.api#documentation": "

Information about the properties of an image’s background, including the background’s\n quality and dominant colors, including the quality and dominant colors of the image.

" } } }, "traits": { - "smithy.api#documentation": "

Information about the quality and dominant colors of an input image. \n Quality and color information is returned for the entire image, foreground, and background.

" + "smithy.api#documentation": "

Information about the quality and dominant colors of an input image. Quality and color\n information is returned for the entire image, foreground, and background.

" } }, "com.amazonaws.rekognition#DetectLabelsImagePropertiesSettings": { @@ -3867,7 +3980,7 @@ "target": "com.amazonaws.rekognition#DetectLabelsMaxDominantColors", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The maximum number of dominant colors to return when detecting labels in an image. The default value is 10.

" + "smithy.api#documentation": "

The maximum number of dominant colors to return when detecting labels in an image. The\n default value is 10.

" } } }, @@ -3898,7 +4011,7 @@ } }, "traits": { - "smithy.api#documentation": "

The quality of an image provided for label detection, with regard to brightness, sharpness, and contrast.

" + "smithy.api#documentation": "

The quality of an image provided for label detection, with regard to brightness,\n sharpness, and contrast.

" } }, "com.amazonaws.rekognition#DetectLabelsMaxDominantColors": { @@ -3924,25 +4037,25 @@ "MaxLabels": { "target": "com.amazonaws.rekognition#UInteger", "traits": { - "smithy.api#documentation": "

Maximum number of labels you want the service to return in the response. The service\n returns the specified number of highest confidence labels.

" + "smithy.api#documentation": "

Maximum number of labels you want the service to return in the response. The service\n returns the specified number of highest confidence labels. Only valid when GENERAL_LABELS is\n specified as a feature type in the Feature input parameter.

" } }, "MinConfidence": { "target": "com.amazonaws.rekognition#Percent", "traits": { - "smithy.api#documentation": "

Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't\n return any labels with confidence lower than this specified value.

\n

If MinConfidence is not specified, the operation returns labels with a\n confidence values greater than or equal to 55 percent.

" + "smithy.api#documentation": "

Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't\n return any labels with confidence lower than this specified value.

\n

If MinConfidence is not specified, the operation returns labels with a\n confidence values greater than or equal to 55 percent. Only valid when GENERAL_LABELS is\n specified as a feature type in the Feature input parameter.

" } }, "Features": { "target": "com.amazonaws.rekognition#DetectLabelsFeatureList", "traits": { - "smithy.api#documentation": "

A list of the types of analysis to perform. Specifying GENERAL_LABELS uses the label detection \n feature, while specifying IMAGE_PROPERTIES returns information regarding image color and quality. \n If no option is specified GENERAL_LABELS is used by default.

" + "smithy.api#documentation": "

A list of the types of analysis to perform. Specifying GENERAL_LABELS uses the label\n detection feature, while specifying IMAGE_PROPERTIES returns information regarding image color\n and quality. If no option is specified GENERAL_LABELS is used by default.

" } }, "Settings": { "target": "com.amazonaws.rekognition#DetectLabelsSettings", "traits": { - "smithy.api#documentation": "

A list of the filters to be applied to returned detected labels and image properties. Specified \n filters can be inclusive, exclusive, or a combination of both. Filters can be used for individual \n labels or label categories. The exact label names or label categories must be supplied. For \n a full list of labels and label categories, see Detecting labels.

" + "smithy.api#documentation": "

A list of the filters to be applied to returned detected labels and image properties.\n Specified filters can be inclusive, exclusive, or a combination of both. Filters can be used\n for individual labels or label categories. The exact label names or label categories must be\n supplied. For a full list of labels and label categories, see Detecting labels.

" } } }, @@ -3974,7 +4087,7 @@ "ImageProperties": { "target": "com.amazonaws.rekognition#DetectLabelsImageProperties", "traits": { - "smithy.api#documentation": "

Information about the properties of the input image, such as brightness, sharpness, contrast, and dominant colors.

" + "smithy.api#documentation": "

Information about the properties of the input image, such as brightness, sharpness,\n contrast, and dominant colors.

" } } }, @@ -3999,7 +4112,7 @@ } }, "traits": { - "smithy.api#documentation": "

Settings for the DetectLabels request. Settings can include \n filters for both GENERAL_LABELS and IMAGE_PROPERTIES. GENERAL_LABELS filters can be inclusive \n or exclusive and applied to individual labels or label categories. IMAGE_PROPERTIES filters \n allow specification of a maximum number of dominant colors.

" + "smithy.api#documentation": "

Settings for the DetectLabels request. Settings can include filters for both\n GENERAL_LABELS and IMAGE_PROPERTIES. GENERAL_LABELS filters can be inclusive or exclusive and\n applied to individual labels or label categories. IMAGE_PROPERTIES filters allow specification\n of a maximum number of dominant colors.

" } }, "com.amazonaws.rekognition#DetectModerationLabels": { @@ -4427,7 +4540,7 @@ "UserStatus": { "target": "com.amazonaws.rekognition#UserStatus", "traits": { - "smithy.api#documentation": "

The status of an update made to a User. Reflects if the User has been updated for every requested change.

" + "smithy.api#documentation": "

The status of an update made to a User. Reflects if the User has been updated for every\n requested change.

" } } }, @@ -4508,7 +4621,24 @@ } ], "traits": { - "smithy.api#documentation": "

Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project.\n DistributeDatasetEntries moves 20% of the training dataset images to the test dataset.\n An entry is a JSON Line that describes an image.\n

\n

You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. \n The training dataset must contain the images that you want to split. The test dataset \n must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset.

\n

Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation\n is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. \n If the dataset split fails, the value of Status is UPDATE_FAILED.

\n

This operation requires permissions to perform the rekognition:DistributeDatasetEntries action.

" + "smithy.api#documentation": "

Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project.\n DistributeDatasetEntries moves 20% of the training dataset images to the test dataset.\n An entry is a JSON Line that describes an image.\n

\n

You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. \n The training dataset must contain the images that you want to split. The test dataset \n must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset.

\n

Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation\n is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. \n If the dataset split fails, the value of Status is UPDATE_FAILED.

\n

This operation requires permissions to perform the rekognition:DistributeDatasetEntries action.

", + "smithy.api#examples": [ + { + "title": "To distribute an Amazon Rekognition Custom Labels dataset", + "documentation": "Distributes an Amazon Rekognition Custom Labels training dataset to a test dataset.", + "input": { + "Datasets": [ + { + "Arn": "arn:aws:rekognition:us-east-1:111122223333:project/my-proj-2/dataset/train/1690564858106" + }, + { + "Arn": "arn:aws:rekognition:us-east-1:111122223333:project/my-proj-2/dataset/test/1690564858106" + } + ] + }, + "output": {} + } + ] } }, "com.amazonaws.rekognition#DistributeDatasetEntriesRequest": { @@ -4786,7 +4916,7 @@ } }, "traits": { - "smithy.api#documentation": "

Indicates the direction the eyes are gazing in (independent of the head pose) as determined by its pitch and yaw.

" + "smithy.api#documentation": "

Indicates the direction the eyes are gazing in (independent of the head pose) as\n determined by its pitch and yaw.

" } }, "com.amazonaws.rekognition#EyeOpen": { @@ -5281,7 +5411,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains filters for the object labels returned by DetectLabels. Filters can be inclusive, \n exclusive, or a combination of both and can be applied to individual labels or entire label categories.\n To see a list of label categories, see Detecting Labels.

" + "smithy.api#documentation": "

Contains filters for the object labels returned by DetectLabels. Filters can be inclusive,\n exclusive, or a combination of both and can be applied to individual labels or entire label\n categories. To see a list of label categories, see Detecting Labels.

" } }, "com.amazonaws.rekognition#Geometry": { @@ -5486,7 +5616,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the celebrity recognition operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartCelebrityRecognition.

" + "smithy.api#documentation": "

Job identifier for the celebrity recognition operation for which you want to obtain\n results. The job identifer is returned by an initial call to StartCelebrityRecognition.

" } }, "Video": { @@ -5495,7 +5625,7 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartCelebrityRecognition and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartCelebrityRecognition and returned in the\n job completion notification sent to your Amazon Simple Notification Service topic.

" } } }, @@ -5574,7 +5704,7 @@ "AggregateBy": { "target": "com.amazonaws.rekognition#ContentModerationAggregateBy", "traits": { - "smithy.api#documentation": "

Defines how to aggregate results of the StartContentModeration request. \n Default aggregation option is TIMESTAMPS. \n SEGMENTS mode aggregates moderation labels over time.

" + "smithy.api#documentation": "

Defines how to aggregate results of the StartContentModeration request. Default\n aggregation option is TIMESTAMPS. SEGMENTS mode aggregates moderation labels over time.

" } } }, @@ -5599,7 +5729,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains metadata about a content moderation request, \n including the SortBy and AggregateBy options.

" + "smithy.api#documentation": "

Contains metadata about a content moderation request, including the SortBy and AggregateBy\n options.

" } }, "com.amazonaws.rekognition#GetContentModerationResponse": { @@ -5644,7 +5774,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the content moderation operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartContentModeration.

" + "smithy.api#documentation": "

Job identifier for the content moderation operation for which you want to obtain results.\n The job identifer is returned by an initial call to StartContentModeration.

" } }, "Video": { @@ -5653,13 +5783,13 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartContentModeration and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartContentModeration and returned in the job\n completion notification sent to your Amazon Simple Notification Service topic.

" } }, "GetRequestMetadata": { "target": "com.amazonaws.rekognition#GetContentModerationRequestMetadata", "traits": { - "smithy.api#documentation": "

Information about the paramters used when getting a response. Includes \n information on aggregation and sorting methods.

" + "smithy.api#documentation": "

Information about the paramters used when getting a response. Includes information on\n aggregation and sorting methods.

" } } }, @@ -5699,7 +5829,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection.

\n

Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection\n which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to\n the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results\n of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED.\n If so, call GetFaceDetection and pass the job identifier\n (JobId) from the initial call to StartFaceDetection.

\n

\n GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

\n

Use MaxResults parameter to limit the number of labels returned. If there are more results than\n specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set\n of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token\n value returned from the previous call to GetFaceDetection.

", + "smithy.api#documentation": "

Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection.

\n

Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection\n which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to\n the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results\n of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED.\n If so, call GetFaceDetection and pass the job identifier\n (JobId) from the initial call to StartFaceDetection.

\n

\n GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected.

\n

Use MaxResults parameter to limit the number of labels returned. If there are more results than\n specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set\n of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token\n value returned from the previous call to GetFaceDetection.

\n

Note that for the GetFaceDetection operation, the returned values for\n FaceOccluded and EyeDirection will always be \"null\".

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5770,7 +5900,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the face detection operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartFaceDetection.

" + "smithy.api#documentation": "

Job identifier for the face detection operation for which you want to obtain results. The\n job identifer is returned by an initial call to StartFaceDetection.

" } }, "Video": { @@ -5779,7 +5909,7 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartFaceDetection and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartFaceDetection and returned in the job\n completion notification sent to your Amazon Simple Notification Service topic.

" } } }, @@ -5816,7 +5946,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the results of a specific Face Liveness session. It requires the\n sessionId as input, which was created using\n CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence\n score, a reference image that includes a face bounding box, and audit images that also contain\n face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The reference\n image can optionally be returned.

" + "smithy.api#documentation": "

Retrieves the results of a specific Face Liveness session. It requires the\n sessionId as input, which was created using\n CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence\n score, a reference image that includes a face bounding box, and audit images that also contain\n face bounding boxes. The Face Liveness confidence score ranges from 0 to 100.

\n

The number of audit images returned by GetFaceLivenessSessionResults is\n defined by the AuditImagesLimit paramater when calling\n CreateFaceLivenessSession. Reference images are always returned when\n possible.

" } }, "com.amazonaws.rekognition#GetFaceLivenessSessionResultsRequest": { @@ -5854,7 +5984,7 @@ "Confidence": { "target": "com.amazonaws.rekognition#Percent", "traits": { - "smithy.api#documentation": "

Probabalistic confidence score for if the person in the given video was live, represented as a\n float value between 0 to 100.

" + "smithy.api#documentation": "

Probabalistic confidence score for if the person in the given video was live, represented\n as a float value between 0 to 100.

" } }, "ReferenceImage": { @@ -5866,7 +5996,7 @@ "AuditImages": { "target": "com.amazonaws.rekognition#AuditImages", "traits": { - "smithy.api#documentation": "

A set of images from the Face Liveness video that can be used for audit purposes. It\n includes a bounding box of the face and the Base64-encoded bytes that return an image. If the\n CreateFaceLivenessSession request included an OutputConfig argument, the image will be\n uploaded to an S3Object specified in the output configuration.

" + "smithy.api#documentation": "

A set of images from the Face Liveness video that can be used for audit purposes. It\n includes a bounding box of the face and the Base64-encoded bytes that return an image. If the\n CreateFaceLivenessSession request included an OutputConfig argument, the image will be\n uploaded to an S3Object specified in the output configuration. If no Amazon S3 bucket is defined,\n raw bytes are sent instead.

" } } }, @@ -5983,7 +6113,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the face search operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartFaceSearch.

" + "smithy.api#documentation": "

Job identifier for the face search operation for which you want to obtain results. The job\n identifer is returned by an initial call to StartFaceSearch.

" } }, "Video": { @@ -5992,7 +6122,7 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartFaceSearch and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartFaceSearch and returned in the job\n completion notification sent to your Amazon Simple Notification Service topic.

" } } }, @@ -6096,7 +6226,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains metadata about a label detection request, \n including the SortBy and AggregateBy options.

" + "smithy.api#documentation": "

Contains metadata about a label detection request, including the SortBy and AggregateBy\n options.

" } }, "com.amazonaws.rekognition#GetLabelDetectionResponse": { @@ -6141,7 +6271,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the label detection operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartLabelDetection.

" + "smithy.api#documentation": "

Job identifier for the label detection operation for which you want to obtain results. The\n job identifer is returned by an initial call to StartLabelDetection.

" } }, "Video": { @@ -6150,13 +6280,13 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartLabelDetection and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartLabelDetection and returned in the job\n completion notification sent to your Amazon Simple Notification Service topic.

" } }, "GetRequestMetadata": { "target": "com.amazonaws.rekognition#GetLabelDetectionRequestMetadata", "traits": { - "smithy.api#documentation": "

Information about the paramters used when getting a response. Includes \n information on aggregation and sorting methods.

" + "smithy.api#documentation": "

Information about the paramters used when getting a response. Includes information on\n aggregation and sorting methods.

" } } }, @@ -6273,7 +6403,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the person tracking operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartPersonTracking.

" + "smithy.api#documentation": "

Job identifier for the person tracking operation for which you want to obtain results. The\n job identifer is returned by an initial call to StartPersonTracking.

" } }, "Video": { @@ -6282,7 +6412,7 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartCelebrityRecognition and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartCelebrityRecognition and returned in the\n job completion notification sent to your Amazon Simple Notification Service topic.

" } } }, @@ -6405,7 +6535,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the segment detection operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartSegmentDetection.

" + "smithy.api#documentation": "

Job identifier for the segment detection operation for which you want to obtain results.\n The job identifer is returned by an initial call to StartSegmentDetection.

" } }, "Video": { @@ -6414,7 +6544,7 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartSegmentDetection and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartSegmentDetection and returned in the job\n completion notification sent to your Amazon Simple Notification Service topic.

" } } }, @@ -6454,7 +6584,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection.

\n

Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by \n calling StartTextDetection which returns a job identifier (JobId)\n When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service\n topic registered in the initial call to StartTextDetection. To get the results\n of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. \n if so, call GetTextDetection and pass the job identifier (JobId) from the initial call\n of StartLabelDetection.

\n

\n GetTextDetection returns an array of detected text (TextDetections) sorted by \n the time the text was detected, up to 50 words per frame of video.

\n

Each element of the array includes the detected text, the precentage confidence in the acuracy \n of the detected text, the time the text was detected, bounding box information for where the text\n was located, and unique identifiers for words and their lines.

\n

Use MaxResults parameter to limit the number of text detections returned. If there are more results than \n specified in MaxResults, the value of NextToken in the operation response contains\n a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection\n and populate the NextToken request parameter with the token value returned from the previous \n call to GetTextDetection.

", + "smithy.api#documentation": "

Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection.

\n

Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by \n calling StartTextDetection which returns a job identifier (JobId)\n When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service\n topic registered in the initial call to StartTextDetection. To get the results\n of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. \n if so, call GetTextDetection and pass the job identifier (JobId) from the initial call\n of StartLabelDetection.

\n

\n GetTextDetection returns an array of detected text (TextDetections) sorted by \n the time the text was detected, up to 100 words per frame of video.

\n

Each element of the array includes the detected text, the precentage confidence in the acuracy \n of the detected text, the time the text was detected, bounding box information for where the text\n was located, and unique identifiers for words and their lines.

\n

Use MaxResults parameter to limit the number of text detections returned. If there are more results than \n specified in MaxResults, the value of NextToken in the operation response contains\n a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection\n and populate the NextToken request parameter with the token value returned from the previous \n call to GetTextDetection.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6528,7 +6658,7 @@ "JobId": { "target": "com.amazonaws.rekognition#JobId", "traits": { - "smithy.api#documentation": "

Job identifier for the text detection operation for which you \n want to obtain results. The job identifer is returned by an initial call \n to StartTextDetection.

" + "smithy.api#documentation": "

Job identifier for the text detection operation for which you want to obtain results. The\n job identifer is returned by an initial call to StartTextDetection.

" } }, "Video": { @@ -6537,7 +6667,7 @@ "JobTag": { "target": "com.amazonaws.rekognition#JobTag", "traits": { - "smithy.api#documentation": "

A job identifier specified in the call to StartTextDetection and \n returned in the job completion notification sent to your \n Amazon Simple Notification Service topic.

" + "smithy.api#documentation": "

A job identifier specified in the call to StartTextDetection and returned in the job\n completion notification sent to your Amazon Simple Notification Service topic.

" } } }, @@ -6736,7 +6866,7 @@ "Bytes": { "target": "com.amazonaws.rekognition#ImageBlob", "traits": { - "smithy.api#documentation": "

Blob of image bytes up to 5 MBs. Note that the maximum image size you can pass to \n DetectCustomLabels is 4MB.

" + "smithy.api#documentation": "

Blob of image bytes up to 5 MBs. Note that the maximum image size you can\n pass to DetectCustomLabels is 4MB.

" } }, "S3Object": { @@ -7405,7 +7535,7 @@ "Instances": { "target": "com.amazonaws.rekognition#Instances", "traits": { - "smithy.api#documentation": "

If Label represents an object, Instances contains the bounding boxes for each instance of the detected object.\n Bounding boxes are returned for common object labels such as people, cars, furniture, apparel or pets.

" + "smithy.api#documentation": "

If Label represents an object, Instances contains the bounding\n boxes for each instance of the detected object. Bounding boxes are returned for common object\n labels such as people, cars, furniture, apparel or pets.

" } }, "Parents": { @@ -7428,7 +7558,7 @@ } }, "traits": { - "smithy.api#documentation": "

Structure containing details about the detected label, including the name, detected instances, parent labels, and level of\n confidence.

\n

\n

" + "smithy.api#documentation": "

Structure containing details about the detected label, including the name, detected\n instances, parent labels, and level of confidence.

\n

" } }, "com.amazonaws.rekognition#LabelAlias": { @@ -7965,6 +8095,29 @@ ], "traits": { "smithy.api#documentation": "

\nLists the entries (images) within a dataset. An entry is a\nJSON Line that contains the information for a single image, including\nthe image location, assigned labels, and object location bounding boxes. For \nmore information, see Creating a manifest file.

\n

JSON Lines in the response include information about non-terminal\n errors found in the dataset. \n Non terminal errors are reported in errors lists within each JSON Line. The\n same information is reported in the training and testing validation result manifests that\n Amazon Rekognition Custom Labels creates during model training.\n

\n

You can filter the response in variety of ways, such as choosing which labels to return and returning JSON Lines created after a specific date.\n

\n

This operation requires permissions to perform the rekognition:ListDatasetEntries action.

", + "smithy.api#examples": [ + { + "title": "To list the entries in an Amazon Rekognition Custom Labels dataset", + "documentation": "Lists the JSON line entries in an Amazon Rekognition Custom Labels dataset.", + "input": { + "DatasetArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-proj-2/dataset/train/1690564858106", + "ContainsLabels": [ + "camellia" + ], + "Labeled": true, + "SourceRefContains": "camellia4.jpg", + "HasErrors": true, + "NextToken": "", + "MaxResults": 100 + }, + "output": { + "DatasetEntries": [ + "{\"source-ref\":\"s3://custom-labels-console-us-east-1-1111111111/assets/flowers_1_train_dataset/camellia4.jpg\",\"camellia\":1,\"camellia-metadata\":{\"confidence\":1,\"job-name\":\"labeling-job/camellia\",\"class-name\":\"camellia\",\"human-annotated\":\"yes\",\"creation-date\":\"2021-07-11T03:32:13.456Z\",\"type\":\"groundtruth/image-classification\"},\"with_leaves\":1,\"with_leaves-metadata\":{\"confidence\":1,\"job-name\":\"labeling-job/with_leaves\",\"class-name\":\"with_leaves\",\"human-annotated\":\"yes\",\"creation-date\":\"2021-07-11T03:32:13.456Z\",\"type\":\"groundtruth/image-classification\"},\"cl-metadata\":{\"is_labeled\":true}}" + ], + "NextToken": "" + } + } + ], "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8092,6 +8245,39 @@ ], "traits": { "smithy.api#documentation": "

Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see \n Labeling images.\n

\n

\n Lists the labels in a dataset. Amazon Rekognition Custom Labels uses labels to describe images. For more information, see Labeling images\n in the Amazon Rekognition Custom Labels Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To list the entries in an Amazon Rekognition Custom Labels dataset", + "documentation": "Lists the JSON line entries in an Amazon Rekognition Custom Labels dataset.", + "input": { + "DatasetArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-proj-2/dataset/train/1690564858106", + "NextToken": "", + "MaxResults": 100 + }, + "output": { + "DatasetLabelDescriptions": [ + { + "LabelName": "camellia", + "LabelStats": { + "EntryCount": 1 + } + }, + { + "LabelName": "with_leaves", + "LabelStats": { + "EntryCount": 2 + } + }, + { + "LabelName": "mediterranean_spurge", + "LabelStats": { + "EntryCount": 1 + } + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8274,13 +8460,13 @@ "UserId": { "target": "com.amazonaws.rekognition#UserId", "traits": { - "smithy.api#documentation": "

An array of user IDs to match when listing faces in a collection.

" + "smithy.api#documentation": "

An array of user IDs to filter results with when listing faces in a collection.

" } }, "FaceIds": { "target": "com.amazonaws.rekognition#FaceIdList", "traits": { - "smithy.api#documentation": "

An array of face IDs to match when listing faces in a collection.

" + "smithy.api#documentation": "

An array of face IDs to filter results with when listing faces in a collection.

" } } }, @@ -8651,7 +8837,7 @@ "NextToken": { "target": "com.amazonaws.rekognition#PaginationToken", "traits": { - "smithy.api#documentation": "

A pagination token to be used with the subsequent request if the response is truncated.

" + "smithy.api#documentation": "

A pagination token to be used with the subsequent request if the response is\n truncated.

" } } }, @@ -8687,7 +8873,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains settings that specify the location of an Amazon S3 bucket used \n to store the output of a Face Liveness session. Note that the S3 bucket must be located \n in the caller's AWS account and in the same region as the Face Liveness end-point. Additionally, the Amazon S3 object keys are \n auto-generated by the Face Liveness system.

" + "smithy.api#documentation": "

Contains settings that specify the location of an Amazon S3 bucket used to store the output of\n a Face Liveness session. Note that the S3 bucket must be located in the caller's AWS account\n and in the same region as the Face Liveness end-point. Additionally, the Amazon S3 object keys are\n auto-generated by the Face Liveness system.

" } }, "com.amazonaws.rekognition#LivenessS3KeyPrefix": { @@ -10366,52 +10552,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -10419,13 +10609,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -10435,224 +10634,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://rekognition-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://rekognition-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rekognition-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://rekognition-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://rekognition.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://rekognition.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://rekognition.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://rekognition.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] }, @@ -11803,7 +11953,7 @@ } ], "traits": { - "smithy.api#documentation": "

Searches for UserIDs using a supplied image. It first detects the largest face in the\n image, and then searches a specified collection for matching UserIDs.

\n

The operation returns an array of UserIDs that match the face in the supplied image,\n ordered by similarity score with the highest similarity first. It also returns a bounding box\n for the face found in the input image.

\n

Information about faces detected in the supplied image, but not used for the search, is\n returned in an array of UnsearchedFace objects. If no valid face is detected\n in the image, the response will contain an empty UserMatches list and no\n SearchedFace object.

", + "smithy.api#documentation": "

Searches for UserIDs using a supplied image. It first detects the largest face in the\n image, and then searches a specified collection for matching UserIDs.

\n

The operation returns an array of UserIDs that match the face in the supplied image,\n ordered by similarity score with the highest similarity first. It also returns a bounding box\n for the face found in the input image.

\n

Information about faces detected in the supplied image, but not used for the search, is\n returned in an array of UnsearchedFace objects. If no valid face is detected in\n the image, the response will contain an empty UserMatches list and no\n SearchedFace object.

", "smithy.api#examples": [ { "title": "SearchUsersByImage", @@ -11931,13 +12081,13 @@ "SearchedFace": { "target": "com.amazonaws.rekognition#SearchedFaceDetails", "traits": { - "smithy.api#documentation": "

A list of FaceDetail objects containing the BoundingBox for the largest face in image,\n as well as the confidence in the bounding box, that was searched for matches. If no valid\n face is detected in the image the response will contain no SearchedFace object.

" + "smithy.api#documentation": "

A list of FaceDetail objects containing the BoundingBox for the largest face in image, as\n well as the confidence in the bounding box, that was searched for matches. If no valid face is\n detected in the image the response will contain no SearchedFace object.

" } }, "UnsearchedFaces": { "target": "com.amazonaws.rekognition#UnsearchedFacesList", "traits": { - "smithy.api#documentation": "

List of UnsearchedFace objects. Contains the face details infered from the specified\n image but not used for search. Contains reasons that describe why a face wasn't used for\n Search.

" + "smithy.api#documentation": "

List of UnsearchedFace objects. Contains the face details infered from the specified image\n but not used for search. Contains reasons that describe why a face wasn't used for Search.\n

" } } }, @@ -11990,13 +12140,13 @@ "UserMatches": { "target": "com.amazonaws.rekognition#UserMatchList", "traits": { - "smithy.api#documentation": "

An array of UserMatch objects that matched the input face along with the confidence in\n the match. Array will be empty if there are no matches.

" + "smithy.api#documentation": "

An array of UserMatch objects that matched the input face along with the confidence in the\n match. Array will be empty if there are no matches.

" } }, "FaceModelVersion": { "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

Version number of the face detection model associated with the input\n CollectionId.

" + "smithy.api#documentation": "

Version number of the face detection model associated with the input CollectionId.

" } }, "SearchedFace": { @@ -12027,7 +12177,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for search.

" + "smithy.api#documentation": "

Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for\n search.

" } }, "com.amazonaws.rekognition#SearchedFaceDetails": { @@ -12904,7 +13054,21 @@ } ], "traits": { - "smithy.api#documentation": "

Starts the running of the version of a model. Starting a model takes a while\n to complete. To check the current state of the model, use DescribeProjectVersions.

\n

Once the model is running, you can detect custom labels in new images by calling \n DetectCustomLabels.

\n \n

You are charged for the amount of time that the model is running. To stop a running\n model, call StopProjectVersion.

\n
\n

For more information, see Running a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels Guide.

\n

This operation requires permissions to perform the \n rekognition:StartProjectVersion action.

" + "smithy.api#documentation": "

Starts the running of the version of a model. Starting a model takes a while\n to complete. To check the current state of the model, use DescribeProjectVersions.

\n

Once the model is running, you can detect custom labels in new images by calling \n DetectCustomLabels.

\n \n

You are charged for the amount of time that the model is running. To stop a running\n model, call StopProjectVersion.

\n
\n

For more information, see Running a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels Guide.

\n

This operation requires permissions to perform the \n rekognition:StartProjectVersion action.

", + "smithy.api#examples": [ + { + "title": "To start an Amazon Rekognition Custom Labels model", + "documentation": "Starts a version of an Amazon Rekognition Custom Labels model.", + "input": { + "ProjectVersionArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/version/1/1690556751958", + "MinInferenceUnits": 1, + "MaxInferenceUnits": 1 + }, + "output": { + "Status": "STARTING" + } + } + ] } }, "com.amazonaws.rekognition#StartProjectVersionRequest": { @@ -13329,7 +13493,19 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a running model. The operation might take a while to complete. To\n check the current status, call DescribeProjectVersions.

\n

This operation requires permissions to perform the rekognition:StopProjectVersion action.

" + "smithy.api#documentation": "

Stops a running model. The operation might take a while to complete. To\n check the current status, call DescribeProjectVersions.

\n

This operation requires permissions to perform the rekognition:StopProjectVersion action.

", + "smithy.api#examples": [ + { + "title": "To stop an Amazon Rekognition Custom Labels model.", + "documentation": "Stops a version of an Amazon Rekognition Custom Labels model.", + "input": { + "ProjectVersionArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-project/version/1/1690556751958" + }, + "output": { + "Status": "STOPPING" + } + } + ] } }, "com.amazonaws.rekognition#StopProjectVersionRequest": { @@ -14235,7 +14411,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully associated.

" + "smithy.api#documentation": "

Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully\n associated.

" } }, "com.amazonaws.rekognition#UnsuccessfulFaceAssociationList": { @@ -14302,7 +14478,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully deleted.

" + "smithy.api#documentation": "

Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully\n deleted.

" } }, "com.amazonaws.rekognition#UnsuccessfulFaceDeletionReason": { @@ -14363,7 +14539,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully disassociated.

" + "smithy.api#documentation": "

Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully\n disassociated.

" } }, "com.amazonaws.rekognition#UnsuccessfulFaceDisassociationList": { @@ -14497,7 +14673,20 @@ } ], "traits": { - "smithy.api#documentation": "

Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the\n information for a single image, including\n the image location, assigned labels, and object location bounding boxes. For more information, \n see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

If the source-ref field in the JSON line references an existing image, the existing image in the dataset\n is updated. \n If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset.

\n

You specify the changes that you want to make in the Changes input parameter. \n There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less\nthan 5MB.

\n

\n UpdateDatasetEntries returns immediatly, but the dataset update might take a while to complete.\n Use DescribeDataset to check the \n current status. The dataset updated successfully if the value of Status is\n UPDATE_COMPLETE.

\n

To check if any non-terminal errors occured, call ListDatasetEntries\n and check for the presence of errors lists in the JSON Lines.

\n

Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). \n Currently, you can't access the terminal error information from the Amazon Rekognition Custom Labels SDK.\n

\n

This operation requires permissions to perform the rekognition:UpdateDatasetEntries action.

" + "smithy.api#documentation": "

Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the\n information for a single image, including\n the image location, assigned labels, and object location bounding boxes. For more information, \n see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide.\n

\n

If the source-ref field in the JSON line references an existing image, the existing image in the dataset\n is updated. \n If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset.

\n

You specify the changes that you want to make in the Changes input parameter. \n There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less\nthan 5MB.

\n

\n UpdateDatasetEntries returns immediatly, but the dataset update might take a while to complete.\n Use DescribeDataset to check the \n current status. The dataset updated successfully if the value of Status is\n UPDATE_COMPLETE.

\n

To check if any non-terminal errors occured, call ListDatasetEntries\n and check for the presence of errors lists in the JSON Lines.

\n

Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). \n Currently, you can't access the terminal error information from the Amazon Rekognition Custom Labels SDK.\n

\n

This operation requires permissions to perform the rekognition:UpdateDatasetEntries action.

", + "smithy.api#examples": [ + { + "title": "To-add dataset entries to an Amazon Rekognition Custom Labels dataset", + "documentation": "Adds dataset entries to an Amazon Rekognition Custom Labels dataset.", + "input": { + "DatasetArn": "arn:aws:rekognition:us-east-1:111122223333:project/my-proj-2/dataset/train/1690564858106", + "Changes": { + "GroundTruth": "{\"source-ref\":\"s3://custom-labels-console-us-east-1-111111111/assets/flowers_1_test_dataset/mediterranean_spurge4.jpg\",\"mediterranean_spurge\":1,\"mediterranean_spurge-metadata\":{\"confidence\":1,\"job-name\":\"labeling-job/mediterranean_spurge\",\"class-name\":\"mediterranean_spurge\",\"human-annotated\":\"yes\",\"creation-date\":\"2021-07-11T03:33:42.025Z\",\"type\":\"groundtruth/image-classification\"},\"with_leaves\":1,\"with_leaves-metadata\":{\"confidence\":1,\"job-name\":\"labeling-job/with_leaves\",\"class-name\":\"with_leaves\",\"human-annotated\":\"yes\",\"creation-date\":\"2021-07-11T03:33:42.025Z\",\"type\":\"groundtruth/image-classification\"}}" + } + }, + "output": {} + } + ] } }, "com.amazonaws.rekognition#UpdateDatasetEntriesRequest": {